xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 7d3af8c6a2070d16ec6d1aef203d052d6683100d)
1 /*	$NetBSD: if_wm.c,v 1.248 2013/04/21 19:59:41 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.248 2013/04/21 19:59:41 msaitoh Exp $");
80 
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93 
94 #include <sys/rnd.h>
95 
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100 
101 #include <net/bpf.h>
102 
103 #include <netinet/in.h>			/* XXX for struct ip */
104 #include <netinet/in_systm.h>		/* XXX for struct ip */
105 #include <netinet/ip.h>			/* XXX for struct ip */
106 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
108 
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112 
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121 
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125 
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128 
129 #ifdef WM_DEBUG
130 #define	WM_DEBUG_LINK		0x01
131 #define	WM_DEBUG_TX		0x02
132 #define	WM_DEBUG_RX		0x04
133 #define	WM_DEBUG_GMII		0x08
134 #define	WM_DEBUG_MANAGE		0x10
135 #define	WM_DEBUG_NVM		0x20
136 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138 
139 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
140 #else
141 #define	DPRINTF(x, y)	/* nothing */
142 #endif /* WM_DEBUG */
143 
144 /*
145  * Transmit descriptor list size.  Due to errata, we can only have
146  * 256 hardware descriptors in the ring on < 82544, but we use 4096
147  * on >= 82544.  We tell the upper layers that they can queue a lot
148  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149  * of them at a time.
150  *
151  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
152  * chains containing many small mbufs have been observed in zero-copy
153  * situations with jumbo frames.
154  */
155 #define	WM_NTXSEGS		256
156 #define	WM_IFQUEUELEN		256
157 #define	WM_TXQUEUELEN_MAX	64
158 #define	WM_TXQUEUELEN_MAX_82547	16
159 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
160 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
161 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
162 #define	WM_NTXDESC_82542	256
163 #define	WM_NTXDESC_82544	4096
164 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
165 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
166 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
168 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169 
170 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
171 
172 /*
173  * Receive descriptor list size.  We have one Rx buffer for normal
174  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
175  * packet.  We allocate 256 receive descriptors, each with a 2k
176  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177  */
178 #define	WM_NRXDESC		256
179 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
180 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
181 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
182 
183 /*
184  * Control structures are DMA'd to the i82542 chip.  We allocate them in
185  * a single clump that maps to a single DMA segment to make several things
186  * easier.
187  */
188 struct wm_control_data_82544 {
189 	/*
190 	 * The receive descriptors.
191 	 */
192 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193 
194 	/*
195 	 * The transmit descriptors.  Put these at the end, because
196 	 * we might use a smaller number of them.
197 	 */
198 	union {
199 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
201 	} wdc_u;
202 };
203 
204 struct wm_control_data_82542 {
205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208 
209 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
210 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
212 
213 /*
214  * Software state for transmit jobs.
215  */
216 struct wm_txsoft {
217 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
218 	bus_dmamap_t txs_dmamap;	/* our DMA map */
219 	int txs_firstdesc;		/* first descriptor in packet */
220 	int txs_lastdesc;		/* last descriptor in packet */
221 	int txs_ndesc;			/* # of descriptors used */
222 };
223 
224 /*
225  * Software state for receive buffers.  Each descriptor gets a
226  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
227  * more than one buffer, we chain them together.
228  */
229 struct wm_rxsoft {
230 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
231 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
232 };
233 
234 #define WM_LINKUP_TIMEOUT	50
235 
236 static uint16_t swfwphysem[] = {
237 	SWFW_PHY0_SM,
238 	SWFW_PHY1_SM,
239 	SWFW_PHY2_SM,
240 	SWFW_PHY3_SM
241 };
242 
243 /*
244  * Software state per device.
245  */
246 struct wm_softc {
247 	device_t sc_dev;		/* generic device information */
248 	bus_space_tag_t sc_st;		/* bus space tag */
249 	bus_space_handle_t sc_sh;	/* bus space handle */
250 	bus_size_t sc_ss;		/* bus space size */
251 	bus_space_tag_t sc_iot;		/* I/O space tag */
252 	bus_space_handle_t sc_ioh;	/* I/O space handle */
253 	bus_size_t sc_ios;		/* I/O space size */
254 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
255 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
256 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
257 
258 	struct ethercom sc_ethercom;	/* ethernet common data */
259 	struct mii_data sc_mii;		/* MII/media information */
260 
261 	pci_chipset_tag_t sc_pc;
262 	pcitag_t sc_pcitag;
263 	int sc_bus_speed;		/* PCI/PCIX bus speed */
264 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
265 
266 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 	wm_chip_type sc_type;		/* MAC type */
268 	int sc_rev;			/* MAC revision */
269 	wm_phy_type sc_phytype;		/* PHY type */
270 	int sc_funcid;			/* unit number of the chip (0 to 3) */
271 	int sc_flags;			/* flags; see below */
272 	int sc_if_flags;		/* last if_flags */
273 	int sc_flowflags;		/* 802.3x flow control flags */
274 	int sc_align_tweak;
275 
276 	void *sc_ih;			/* interrupt cookie */
277 	callout_t sc_tick_ch;		/* tick callout */
278 
279 	int sc_ee_addrbits;		/* EEPROM address bits */
280 	int sc_ich8_flash_base;
281 	int sc_ich8_flash_bank_size;
282 	int sc_nvm_k1_enabled;
283 
284 	/*
285 	 * Software state for the transmit and receive descriptors.
286 	 */
287 	int sc_txnum;			/* must be a power of two */
288 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290 
291 	/*
292 	 * Control data structures.
293 	 */
294 	int sc_ntxdesc;			/* must be a power of two */
295 	struct wm_control_data_82544 *sc_control_data;
296 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
297 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
298 	int sc_cd_rseg;			/* real number of control segment */
299 	size_t sc_cd_size;		/* control data size */
300 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
301 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
302 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
303 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
304 
305 #ifdef WM_EVENT_COUNTERS
306 	/* Event counters. */
307 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
308 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
309 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
311 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
312 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
313 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
314 
315 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
316 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
317 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
318 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
319 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
320 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
321 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
322 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
323 
324 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
326 
327 	struct evcnt sc_ev_tu;		/* Tx underrun */
328 
329 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
330 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
331 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
332 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
333 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335 
336 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
337 
338 	int	sc_txfree;		/* number of free Tx descriptors */
339 	int	sc_txnext;		/* next ready Tx descriptor */
340 
341 	int	sc_txsfree;		/* number of free Tx jobs */
342 	int	sc_txsnext;		/* next free Tx job */
343 	int	sc_txsdirty;		/* dirty Tx jobs */
344 
345 	/* These 5 variables are used only on the 82547. */
346 	int	sc_txfifo_size;		/* Tx FIFO size */
347 	int	sc_txfifo_head;		/* current head of FIFO */
348 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
349 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
350 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
351 
352 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
353 
354 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
355 	int	sc_rxdiscard;
356 	int	sc_rxlen;
357 	struct mbuf *sc_rxhead;
358 	struct mbuf *sc_rxtail;
359 	struct mbuf **sc_rxtailp;
360 
361 	uint32_t sc_ctrl;		/* prototype CTRL register */
362 #if 0
363 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
364 #endif
365 	uint32_t sc_icr;		/* prototype interrupt bits */
366 	uint32_t sc_itr;		/* prototype intr throttling reg */
367 	uint32_t sc_tctl;		/* prototype TCTL register */
368 	uint32_t sc_rctl;		/* prototype RCTL register */
369 	uint32_t sc_txcw;		/* prototype TXCW register */
370 	uint32_t sc_tipg;		/* prototype TIPG register */
371 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
372 	uint32_t sc_pba;		/* prototype PBA register */
373 
374 	int sc_tbi_linkup;		/* TBI link status */
375 	int sc_tbi_anegticks;		/* autonegotiation ticks */
376 	int sc_tbi_ticks;		/* tbi ticks */
377 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
378 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
379 
380 	int sc_mchash_type;		/* multicast filter offset */
381 
382 	krndsource_t rnd_source;	/* random source */
383 };
384 
385 #define	WM_RXCHAIN_RESET(sc)						\
386 do {									\
387 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
388 	*(sc)->sc_rxtailp = NULL;					\
389 	(sc)->sc_rxlen = 0;						\
390 } while (/*CONSTCOND*/0)
391 
392 #define	WM_RXCHAIN_LINK(sc, m)						\
393 do {									\
394 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
395 	(sc)->sc_rxtailp = &(m)->m_next;				\
396 } while (/*CONSTCOND*/0)
397 
398 #ifdef WM_EVENT_COUNTERS
399 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
400 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
401 #else
402 #define	WM_EVCNT_INCR(ev)	/* nothing */
403 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
404 #endif
405 
406 #define	CSR_READ(sc, reg)						\
407 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408 #define	CSR_WRITE(sc, reg, val)						\
409 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410 #define	CSR_WRITE_FLUSH(sc)						\
411 	(void) CSR_READ((sc), WMREG_STATUS)
412 
413 #define ICH8_FLASH_READ32(sc, reg) \
414 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
416 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417 
418 #define ICH8_FLASH_READ16(sc, reg) \
419 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
421 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422 
423 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
424 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
425 
426 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427 #define	WM_CDTXADDR_HI(sc, x)						\
428 	(sizeof(bus_addr_t) == 8 ?					\
429 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430 
431 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432 #define	WM_CDRXADDR_HI(sc, x)						\
433 	(sizeof(bus_addr_t) == 8 ?					\
434 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435 
436 #define	WM_CDTXSYNC(sc, x, n, ops)					\
437 do {									\
438 	int __x, __n;							\
439 									\
440 	__x = (x);							\
441 	__n = (n);							\
442 									\
443 	/* If it will wrap around, sync to the end of the ring. */	\
444 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
445 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
446 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
447 		    (WM_NTXDESC(sc) - __x), (ops));			\
448 		__n -= (WM_NTXDESC(sc) - __x);				\
449 		__x = 0;						\
450 	}								\
451 									\
452 	/* Now sync whatever is left. */				\
453 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
454 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
455 } while (/*CONSTCOND*/0)
456 
457 #define	WM_CDRXSYNC(sc, x, ops)						\
458 do {									\
459 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
460 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
461 } while (/*CONSTCOND*/0)
462 
463 #define	WM_INIT_RXDESC(sc, x)						\
464 do {									\
465 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
466 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
467 	struct mbuf *__m = __rxs->rxs_mbuf;				\
468 									\
469 	/*								\
470 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
471 	 * so that the payload after the Ethernet header is aligned	\
472 	 * to a 4-byte boundary.					\
473 	 *								\
474 	 * XXX BRAINDAMAGE ALERT!					\
475 	 * The stupid chip uses the same size for every buffer, which	\
476 	 * is set in the Receive Control register.  We are using the 2K	\
477 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
478 	 * reason, we can't "scoot" packets longer than the standard	\
479 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
480 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
481 	 * the upper layer copy the headers.				\
482 	 */								\
483 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
484 									\
485 	wm_set_dma_addr(&__rxd->wrx_addr,				\
486 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 	__rxd->wrx_len = 0;						\
488 	__rxd->wrx_cksum = 0;						\
489 	__rxd->wrx_status = 0;						\
490 	__rxd->wrx_errors = 0;						\
491 	__rxd->wrx_special = 0;						\
492 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 									\
494 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
495 } while (/*CONSTCOND*/0)
496 
497 static void	wm_start(struct ifnet *);
498 static void	wm_nq_start(struct ifnet *);
499 static void	wm_watchdog(struct ifnet *);
500 static int	wm_ifflags_cb(struct ethercom *);
501 static int	wm_ioctl(struct ifnet *, u_long, void *);
502 static int	wm_init(struct ifnet *);
503 static void	wm_stop(struct ifnet *, int);
504 static bool	wm_suspend(device_t, const pmf_qual_t *);
505 static bool	wm_resume(device_t, const pmf_qual_t *);
506 
507 static void	wm_reset(struct wm_softc *);
508 static void	wm_rxdrain(struct wm_softc *);
509 static int	wm_add_rxbuf(struct wm_softc *, int);
510 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int	wm_validate_eeprom_checksum(struct wm_softc *);
513 static int	wm_check_alt_mac_addr(struct wm_softc *);
514 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void	wm_tick(void *);
516 
517 static void	wm_set_filter(struct wm_softc *);
518 static void	wm_set_vlan(struct wm_softc *);
519 
520 static int	wm_intr(void *);
521 static void	wm_txintr(struct wm_softc *);
522 static void	wm_rxintr(struct wm_softc *);
523 static void	wm_linkintr(struct wm_softc *, uint32_t);
524 
525 static void	wm_tbi_mediainit(struct wm_softc *);
526 static int	wm_tbi_mediachange(struct ifnet *);
527 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528 
529 static void	wm_tbi_set_linkled(struct wm_softc *);
530 static void	wm_tbi_check_link(struct wm_softc *);
531 
532 static void	wm_gmii_reset(struct wm_softc *);
533 
534 static int	wm_gmii_i82543_readreg(device_t, int, int);
535 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
536 static int	wm_gmii_i82544_readreg(device_t, int, int);
537 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
538 static int	wm_gmii_i80003_readreg(device_t, int, int);
539 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int	wm_gmii_bm_readreg(device_t, int, int);
541 static void	wm_gmii_bm_writereg(device_t, int, int, int);
542 static int	wm_gmii_hv_readreg(device_t, int, int);
543 static void	wm_gmii_hv_writereg(device_t, int, int, int);
544 static int	wm_gmii_82580_readreg(device_t, int, int);
545 static void	wm_gmii_82580_writereg(device_t, int, int, int);
546 static int	wm_sgmii_readreg(device_t, int, int);
547 static void	wm_sgmii_writereg(device_t, int, int, int);
548 
549 static void	wm_gmii_statchg(struct ifnet *);
550 
551 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
552 static int	wm_gmii_mediachange(struct ifnet *);
553 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
554 
555 static int	wm_kmrn_readreg(struct wm_softc *, int);
556 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
557 
558 static void	wm_set_spiaddrbits(struct wm_softc *);
559 static int	wm_match(device_t, cfdata_t, void *);
560 static void	wm_attach(device_t, device_t, void *);
561 static int	wm_detach(device_t, int);
562 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
563 static void	wm_get_auto_rd_done(struct wm_softc *);
564 static void	wm_lan_init_done(struct wm_softc *);
565 static void	wm_get_cfg_done(struct wm_softc *);
566 static int	wm_get_swsm_semaphore(struct wm_softc *);
567 static void	wm_put_swsm_semaphore(struct wm_softc *);
568 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
569 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
570 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
571 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
572 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
573 
574 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
575 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
576 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
577 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
578 		     uint32_t, uint16_t *);
579 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
580 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
581 static void	wm_82547_txfifo_stall(void *);
582 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
583 static int	wm_check_mng_mode(struct wm_softc *);
584 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
585 static int	wm_check_mng_mode_82574(struct wm_softc *);
586 static int	wm_check_mng_mode_generic(struct wm_softc *);
587 static int	wm_enable_mng_pass_thru(struct wm_softc *);
588 static int	wm_check_reset_block(struct wm_softc *);
589 static void	wm_get_hw_control(struct wm_softc *);
590 static int	wm_check_for_link(struct wm_softc *);
591 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
592 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
593 #ifdef WM_WOL
594 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
595 #endif
596 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
597 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
598 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
599 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
600 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
601 static void	wm_smbustopci(struct wm_softc *);
602 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
603 static void	wm_reset_init_script_82575(struct wm_softc *);
604 static void	wm_release_manageability(struct wm_softc *);
605 static void	wm_release_hw_control(struct wm_softc *);
606 static void	wm_get_wakeup(struct wm_softc *);
607 #ifdef WM_WOL
608 static void	wm_enable_phy_wakeup(struct wm_softc *);
609 static void	wm_enable_wakeup(struct wm_softc *);
610 #endif
611 static void	wm_init_manageability(struct wm_softc *);
612 static void	wm_set_eee_i350(struct wm_softc *);
613 
614 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
615     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
616 
617 /*
618  * Devices supported by this driver.
619  */
620 static const struct wm_product {
621 	pci_vendor_id_t		wmp_vendor;
622 	pci_product_id_t	wmp_product;
623 	const char		*wmp_name;
624 	wm_chip_type		wmp_type;
625 	int			wmp_flags;
626 #define	WMP_F_1000X		0x01
627 #define	WMP_F_1000T		0x02
628 #define	WMP_F_SERDES		0x04
629 } wm_products[] = {
630 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
631 	  "Intel i82542 1000BASE-X Ethernet",
632 	  WM_T_82542_2_1,	WMP_F_1000X },
633 
634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
635 	  "Intel i82543GC 1000BASE-X Ethernet",
636 	  WM_T_82543,		WMP_F_1000X },
637 
638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
639 	  "Intel i82543GC 1000BASE-T Ethernet",
640 	  WM_T_82543,		WMP_F_1000T },
641 
642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
643 	  "Intel i82544EI 1000BASE-T Ethernet",
644 	  WM_T_82544,		WMP_F_1000T },
645 
646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
647 	  "Intel i82544EI 1000BASE-X Ethernet",
648 	  WM_T_82544,		WMP_F_1000X },
649 
650 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
651 	  "Intel i82544GC 1000BASE-T Ethernet",
652 	  WM_T_82544,		WMP_F_1000T },
653 
654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
655 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
656 	  WM_T_82544,		WMP_F_1000T },
657 
658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
659 	  "Intel i82540EM 1000BASE-T Ethernet",
660 	  WM_T_82540,		WMP_F_1000T },
661 
662 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
663 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
664 	  WM_T_82540,		WMP_F_1000T },
665 
666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
667 	  "Intel i82540EP 1000BASE-T Ethernet",
668 	  WM_T_82540,		WMP_F_1000T },
669 
670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
671 	  "Intel i82540EP 1000BASE-T Ethernet",
672 	  WM_T_82540,		WMP_F_1000T },
673 
674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
675 	  "Intel i82540EP 1000BASE-T Ethernet",
676 	  WM_T_82540,		WMP_F_1000T },
677 
678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
679 	  "Intel i82545EM 1000BASE-T Ethernet",
680 	  WM_T_82545,		WMP_F_1000T },
681 
682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
683 	  "Intel i82545GM 1000BASE-T Ethernet",
684 	  WM_T_82545_3,		WMP_F_1000T },
685 
686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
687 	  "Intel i82545GM 1000BASE-X Ethernet",
688 	  WM_T_82545_3,		WMP_F_1000X },
689 #if 0
690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
691 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
692 	  WM_T_82545_3,		WMP_F_SERDES },
693 #endif
694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
695 	  "Intel i82546EB 1000BASE-T Ethernet",
696 	  WM_T_82546,		WMP_F_1000T },
697 
698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
699 	  "Intel i82546EB 1000BASE-T Ethernet",
700 	  WM_T_82546,		WMP_F_1000T },
701 
702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
703 	  "Intel i82545EM 1000BASE-X Ethernet",
704 	  WM_T_82545,		WMP_F_1000X },
705 
706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
707 	  "Intel i82546EB 1000BASE-X Ethernet",
708 	  WM_T_82546,		WMP_F_1000X },
709 
710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
711 	  "Intel i82546GB 1000BASE-T Ethernet",
712 	  WM_T_82546_3,		WMP_F_1000T },
713 
714 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
715 	  "Intel i82546GB 1000BASE-X Ethernet",
716 	  WM_T_82546_3,		WMP_F_1000X },
717 #if 0
718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
719 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
720 	  WM_T_82546_3,		WMP_F_SERDES },
721 #endif
722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
723 	  "i82546GB quad-port Gigabit Ethernet",
724 	  WM_T_82546_3,		WMP_F_1000T },
725 
726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
727 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
728 	  WM_T_82546_3,		WMP_F_1000T },
729 
730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
731 	  "Intel PRO/1000MT (82546GB)",
732 	  WM_T_82546_3,		WMP_F_1000T },
733 
734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
735 	  "Intel i82541EI 1000BASE-T Ethernet",
736 	  WM_T_82541,		WMP_F_1000T },
737 
738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
739 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
740 	  WM_T_82541,		WMP_F_1000T },
741 
742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
743 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
744 	  WM_T_82541,		WMP_F_1000T },
745 
746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
747 	  "Intel i82541ER 1000BASE-T Ethernet",
748 	  WM_T_82541_2,		WMP_F_1000T },
749 
750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
751 	  "Intel i82541GI 1000BASE-T Ethernet",
752 	  WM_T_82541_2,		WMP_F_1000T },
753 
754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
755 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
756 	  WM_T_82541_2,		WMP_F_1000T },
757 
758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
759 	  "Intel i82541PI 1000BASE-T Ethernet",
760 	  WM_T_82541_2,		WMP_F_1000T },
761 
762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
763 	  "Intel i82547EI 1000BASE-T Ethernet",
764 	  WM_T_82547,		WMP_F_1000T },
765 
766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
767 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
768 	  WM_T_82547,		WMP_F_1000T },
769 
770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
771 	  "Intel i82547GI 1000BASE-T Ethernet",
772 	  WM_T_82547_2,		WMP_F_1000T },
773 
774 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
775 	  "Intel PRO/1000 PT (82571EB)",
776 	  WM_T_82571,		WMP_F_1000T },
777 
778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
779 	  "Intel PRO/1000 PF (82571EB)",
780 	  WM_T_82571,		WMP_F_1000X },
781 #if 0
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
783 	  "Intel PRO/1000 PB (82571EB)",
784 	  WM_T_82571,		WMP_F_SERDES },
785 #endif
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
787 	  "Intel PRO/1000 QT (82571EB)",
788 	  WM_T_82571,		WMP_F_1000T },
789 
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
791 	  "Intel i82572EI 1000baseT Ethernet",
792 	  WM_T_82572,		WMP_F_1000T },
793 
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
795 	  "Intel PRO/1000 PT Quad Port Server Adapter",
796 	  WM_T_82571,		WMP_F_1000T, },
797 
798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
799 	  "Intel i82572EI 1000baseX Ethernet",
800 	  WM_T_82572,		WMP_F_1000X },
801 #if 0
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
803 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
804 	  WM_T_82572,		WMP_F_SERDES },
805 #endif
806 
807 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
808 	  "Intel i82572EI 1000baseT Ethernet",
809 	  WM_T_82572,		WMP_F_1000T },
810 
811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
812 	  "Intel i82573E",
813 	  WM_T_82573,		WMP_F_1000T },
814 
815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
816 	  "Intel i82573E IAMT",
817 	  WM_T_82573,		WMP_F_1000T },
818 
819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
820 	  "Intel i82573L Gigabit Ethernet",
821 	  WM_T_82573,		WMP_F_1000T },
822 
823 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
824 	  "Intel i82574L",
825 	  WM_T_82574,		WMP_F_1000T },
826 
827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
828 	  "Intel i82583V",
829 	  WM_T_82583,		WMP_F_1000T },
830 
831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
832 	  "i80003 dual 1000baseT Ethernet",
833 	  WM_T_80003,		WMP_F_1000T },
834 
835 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
836 	  "i80003 dual 1000baseX Ethernet",
837 	  WM_T_80003,		WMP_F_1000T },
838 #if 0
839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
840 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
841 	  WM_T_80003,		WMP_F_SERDES },
842 #endif
843 
844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
845 	  "Intel i80003 1000baseT Ethernet",
846 	  WM_T_80003,		WMP_F_1000T },
847 #if 0
848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
849 	  "Intel i80003 Gigabit Ethernet (SERDES)",
850 	  WM_T_80003,		WMP_F_SERDES },
851 #endif
852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
853 	  "Intel i82801H (M_AMT) LAN Controller",
854 	  WM_T_ICH8,		WMP_F_1000T },
855 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
856 	  "Intel i82801H (AMT) LAN Controller",
857 	  WM_T_ICH8,		WMP_F_1000T },
858 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
859 	  "Intel i82801H LAN Controller",
860 	  WM_T_ICH8,		WMP_F_1000T },
861 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
862 	  "Intel i82801H (IFE) LAN Controller",
863 	  WM_T_ICH8,		WMP_F_1000T },
864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
865 	  "Intel i82801H (M) LAN Controller",
866 	  WM_T_ICH8,		WMP_F_1000T },
867 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
868 	  "Intel i82801H IFE (GT) LAN Controller",
869 	  WM_T_ICH8,		WMP_F_1000T },
870 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
871 	  "Intel i82801H IFE (G) LAN Controller",
872 	  WM_T_ICH8,		WMP_F_1000T },
873 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
874 	  "82801I (AMT) LAN Controller",
875 	  WM_T_ICH9,		WMP_F_1000T },
876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
877 	  "82801I LAN Controller",
878 	  WM_T_ICH9,		WMP_F_1000T },
879 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
880 	  "82801I (G) LAN Controller",
881 	  WM_T_ICH9,		WMP_F_1000T },
882 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
883 	  "82801I (GT) LAN Controller",
884 	  WM_T_ICH9,		WMP_F_1000T },
885 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
886 	  "82801I (C) LAN Controller",
887 	  WM_T_ICH9,		WMP_F_1000T },
888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
889 	  "82801I mobile LAN Controller",
890 	  WM_T_ICH9,		WMP_F_1000T },
891 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
892 	  "82801I mobile (V) LAN Controller",
893 	  WM_T_ICH9,		WMP_F_1000T },
894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
895 	  "82801I mobile (AMT) LAN Controller",
896 	  WM_T_ICH9,		WMP_F_1000T },
897 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
898 	  "82567LM-4 LAN Controller",
899 	  WM_T_ICH9,		WMP_F_1000T },
900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
901 	  "82567V-3 LAN Controller",
902 	  WM_T_ICH9,		WMP_F_1000T },
903 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
904 	  "82567LM-2 LAN Controller",
905 	  WM_T_ICH10,		WMP_F_1000T },
906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
907 	  "82567LF-2 LAN Controller",
908 	  WM_T_ICH10,		WMP_F_1000T },
909 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
910 	  "82567LM-3 LAN Controller",
911 	  WM_T_ICH10,		WMP_F_1000T },
912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
913 	  "82567LF-3 LAN Controller",
914 	  WM_T_ICH10,		WMP_F_1000T },
915 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
916 	  "82567V-2 LAN Controller",
917 	  WM_T_ICH10,		WMP_F_1000T },
918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
919 	  "82567V-3? LAN Controller",
920 	  WM_T_ICH10,		WMP_F_1000T },
921 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
922 	  "HANKSVILLE LAN Controller",
923 	  WM_T_ICH10,		WMP_F_1000T },
924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
925 	  "PCH LAN (82577LM) Controller",
926 	  WM_T_PCH,		WMP_F_1000T },
927 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
928 	  "PCH LAN (82577LC) Controller",
929 	  WM_T_PCH,		WMP_F_1000T },
930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
931 	  "PCH LAN (82578DM) Controller",
932 	  WM_T_PCH,		WMP_F_1000T },
933 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
934 	  "PCH LAN (82578DC) Controller",
935 	  WM_T_PCH,		WMP_F_1000T },
936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
937 	  "PCH2 LAN (82579LM) Controller",
938 	  WM_T_PCH2,		WMP_F_1000T },
939 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
940 	  "PCH2 LAN (82579V) Controller",
941 	  WM_T_PCH2,		WMP_F_1000T },
942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
943 	  "82575EB dual-1000baseT Ethernet",
944 	  WM_T_82575,		WMP_F_1000T },
945 #if 0
946 	/*
947 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
948 	 * disabled for now ...
949 	 */
950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
951 	  "82575EB dual-1000baseX Ethernet (SERDES)",
952 	  WM_T_82575,		WMP_F_SERDES },
953 #endif
954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
955 	  "82575GB quad-1000baseT Ethernet",
956 	  WM_T_82575,		WMP_F_1000T },
957 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
958 	  "82575GB quad-1000baseT Ethernet (PM)",
959 	  WM_T_82575,		WMP_F_1000T },
960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
961 	  "82576 1000BaseT Ethernet",
962 	  WM_T_82576,		WMP_F_1000T },
963 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
964 	  "82576 1000BaseX Ethernet",
965 	  WM_T_82576,		WMP_F_1000X },
966 #if 0
967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
968 	  "82576 gigabit Ethernet (SERDES)",
969 	  WM_T_82576,		WMP_F_SERDES },
970 #endif
971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
972 	  "82576 quad-1000BaseT Ethernet",
973 	  WM_T_82576,		WMP_F_1000T },
974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
975 	  "82576 gigabit Ethernet",
976 	  WM_T_82576,		WMP_F_1000T },
977 #if 0
978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
979 	  "82576 gigabit Ethernet (SERDES)",
980 	  WM_T_82576,		WMP_F_SERDES },
981 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
982 	  "82576 quad-gigabit Ethernet (SERDES)",
983 	  WM_T_82576,		WMP_F_SERDES },
984 #endif
985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
986 	  "82580 1000BaseT Ethernet",
987 	  WM_T_82580,		WMP_F_1000T },
988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
989 	  "82580 1000BaseX Ethernet",
990 	  WM_T_82580,		WMP_F_1000X },
991 #if 0
992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
993 	  "82580 1000BaseT Ethernet (SERDES)",
994 	  WM_T_82580,		WMP_F_SERDES },
995 #endif
996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
997 	  "82580 gigabit Ethernet (SGMII)",
998 	  WM_T_82580,		WMP_F_1000T },
999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1000 	  "82580 dual-1000BaseT Ethernet",
1001 	  WM_T_82580,		WMP_F_1000T },
1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
1003 	  "82580 1000BaseT Ethernet",
1004 	  WM_T_82580ER,		WMP_F_1000T },
1005 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
1006 	  "82580 dual-1000BaseT Ethernet",
1007 	  WM_T_82580ER,		WMP_F_1000T },
1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1009 	  "82580 quad-1000BaseX Ethernet",
1010 	  WM_T_82580,		WMP_F_1000X },
1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1012 	  "I350 Gigabit Network Connection",
1013 	  WM_T_I350,		WMP_F_1000T },
1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1015 	  "I350 Gigabit Fiber Network Connection",
1016 	  WM_T_I350,		WMP_F_1000X },
1017 #if 0
1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1019 	  "I350 Gigabit Backplane Connection",
1020 	  WM_T_I350,		WMP_F_SERDES },
1021 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1022 	  "I350 Gigabit Connection",
1023 	  WM_T_I350,		WMP_F_1000T },
1024 #endif
1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1026 	  "I210-T1 Ethernet Server Adapter",
1027 	  WM_T_I210,		WMP_F_1000T },
1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1029 	  "I210 Ethernet (Copper OEM)",
1030 	  WM_T_I210,		WMP_F_1000T },
1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1032 	  "I210 Ethernet (Copper IT)",
1033 	  WM_T_I210,		WMP_F_1000T },
1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1035 	  "I210 Gigabit Ethernet (Fiber)",
1036 	  WM_T_I210,		WMP_F_1000X },
1037 #if 0
1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1039 	  "I210 Gigabit Ethernet (SERDES)",
1040 	  WM_T_I210,		WMP_F_SERDES },
1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1042 	  "I210 Gigabit Ethernet (SGMII)",
1043 	  WM_T_I210,		WMP_F_SERDES },
1044 #endif
1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1046 	  "I211 Ethernet (COPPER)",
1047 	  WM_T_I211,		WMP_F_1000T },
1048 	{ 0,			0,
1049 	  NULL,
1050 	  0,			0 },
1051 };
1052 
1053 #ifdef WM_EVENT_COUNTERS
1054 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1055 #endif /* WM_EVENT_COUNTERS */
1056 
1057 #if 0 /* Not currently used */
1058 static inline uint32_t
1059 wm_io_read(struct wm_softc *sc, int reg)
1060 {
1061 
1062 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1063 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1064 }
1065 #endif
1066 
1067 static inline void
1068 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1069 {
1070 
1071 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1072 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1073 }
1074 
1075 static inline void
1076 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1077     uint32_t data)
1078 {
1079 	uint32_t regval;
1080 	int i;
1081 
1082 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1083 
1084 	CSR_WRITE(sc, reg, regval);
1085 
1086 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1087 		delay(5);
1088 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1089 			break;
1090 	}
1091 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1092 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1093 		    device_xname(sc->sc_dev), reg);
1094 	}
1095 }
1096 
1097 static inline void
1098 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1099 {
1100 	wa->wa_low = htole32(v & 0xffffffffU);
1101 	if (sizeof(bus_addr_t) == 8)
1102 		wa->wa_high = htole32((uint64_t) v >> 32);
1103 	else
1104 		wa->wa_high = 0;
1105 }
1106 
1107 static void
1108 wm_set_spiaddrbits(struct wm_softc *sc)
1109 {
1110 	uint32_t reg;
1111 
1112 	sc->sc_flags |= WM_F_EEPROM_SPI;
1113 	reg = CSR_READ(sc, WMREG_EECD);
1114 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1115 }
1116 
1117 static const struct wm_product *
1118 wm_lookup(const struct pci_attach_args *pa)
1119 {
1120 	const struct wm_product *wmp;
1121 
1122 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1123 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1124 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1125 			return wmp;
1126 	}
1127 	return NULL;
1128 }
1129 
1130 static int
1131 wm_match(device_t parent, cfdata_t cf, void *aux)
1132 {
1133 	struct pci_attach_args *pa = aux;
1134 
1135 	if (wm_lookup(pa) != NULL)
1136 		return 1;
1137 
1138 	return 0;
1139 }
1140 
1141 static void
1142 wm_attach(device_t parent, device_t self, void *aux)
1143 {
1144 	struct wm_softc *sc = device_private(self);
1145 	struct pci_attach_args *pa = aux;
1146 	prop_dictionary_t dict;
1147 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1148 	pci_chipset_tag_t pc = pa->pa_pc;
1149 	pci_intr_handle_t ih;
1150 	const char *intrstr = NULL;
1151 	const char *eetype, *xname;
1152 	bus_space_tag_t memt;
1153 	bus_space_handle_t memh;
1154 	bus_size_t memsize;
1155 	int memh_valid;
1156 	int i, error;
1157 	const struct wm_product *wmp;
1158 	prop_data_t ea;
1159 	prop_number_t pn;
1160 	uint8_t enaddr[ETHER_ADDR_LEN];
1161 	uint16_t cfg1, cfg2, swdpin, io3;
1162 	pcireg_t preg, memtype;
1163 	uint16_t eeprom_data, apme_mask;
1164 	uint32_t reg;
1165 
1166 	sc->sc_dev = self;
1167 	callout_init(&sc->sc_tick_ch, 0);
1168 
1169 	sc->sc_wmp = wmp = wm_lookup(pa);
1170 	if (wmp == NULL) {
1171 		printf("\n");
1172 		panic("wm_attach: impossible");
1173 	}
1174 
1175 	sc->sc_pc = pa->pa_pc;
1176 	sc->sc_pcitag = pa->pa_tag;
1177 
1178 	if (pci_dma64_available(pa))
1179 		sc->sc_dmat = pa->pa_dmat64;
1180 	else
1181 		sc->sc_dmat = pa->pa_dmat;
1182 
1183 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1184 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1185 
1186 	sc->sc_type = wmp->wmp_type;
1187 	if (sc->sc_type < WM_T_82543) {
1188 		if (sc->sc_rev < 2) {
1189 			aprint_error_dev(sc->sc_dev,
1190 			    "i82542 must be at least rev. 2\n");
1191 			return;
1192 		}
1193 		if (sc->sc_rev < 3)
1194 			sc->sc_type = WM_T_82542_2_0;
1195 	}
1196 
1197 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1198 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1199 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
1200 	    || (sc->sc_type == WM_T_I211))
1201 		sc->sc_flags |= WM_F_NEWQUEUE;
1202 
1203 	/* Set device properties (mactype) */
1204 	dict = device_properties(sc->sc_dev);
1205 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1206 
1207 	/*
1208 	 * Map the device.  All devices support memory-mapped acccess,
1209 	 * and it is really required for normal operation.
1210 	 */
1211 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1212 	switch (memtype) {
1213 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1214 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1215 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1216 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1217 		break;
1218 	default:
1219 		memh_valid = 0;
1220 		break;
1221 	}
1222 
1223 	if (memh_valid) {
1224 		sc->sc_st = memt;
1225 		sc->sc_sh = memh;
1226 		sc->sc_ss = memsize;
1227 	} else {
1228 		aprint_error_dev(sc->sc_dev,
1229 		    "unable to map device registers\n");
1230 		return;
1231 	}
1232 
1233 	wm_get_wakeup(sc);
1234 
1235 	/*
1236 	 * In addition, i82544 and later support I/O mapped indirect
1237 	 * register access.  It is not desirable (nor supported in
1238 	 * this driver) to use it for normal operation, though it is
1239 	 * required to work around bugs in some chip versions.
1240 	 */
1241 	if (sc->sc_type >= WM_T_82544) {
1242 		/* First we have to find the I/O BAR. */
1243 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1244 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1245 			if (memtype == PCI_MAPREG_TYPE_IO)
1246 				break;
1247 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1248 			    PCI_MAPREG_MEM_TYPE_64BIT)
1249 				i += 4;	/* skip high bits, too */
1250 		}
1251 		if (i < PCI_MAPREG_END) {
1252 			/*
1253 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1254 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1255 			 * It's no problem because newer chips has no this
1256 			 * bug.
1257 			 *
1258 			 * The i8254x doesn't apparently respond when the
1259 			 * I/O BAR is 0, which looks somewhat like it's not
1260 			 * been configured.
1261 			 */
1262 			preg = pci_conf_read(pc, pa->pa_tag, i);
1263 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1264 				aprint_error_dev(sc->sc_dev,
1265 				    "WARNING: I/O BAR at zero.\n");
1266 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1267 					0, &sc->sc_iot, &sc->sc_ioh,
1268 					NULL, &sc->sc_ios) == 0) {
1269 				sc->sc_flags |= WM_F_IOH_VALID;
1270 			} else {
1271 				aprint_error_dev(sc->sc_dev,
1272 				    "WARNING: unable to map I/O space\n");
1273 			}
1274 		}
1275 
1276 	}
1277 
1278 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1279 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1280 	preg |= PCI_COMMAND_MASTER_ENABLE;
1281 	if (sc->sc_type < WM_T_82542_2_1)
1282 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1283 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1284 
1285 	/* power up chip */
1286 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1287 	    NULL)) && error != EOPNOTSUPP) {
1288 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1289 		return;
1290 	}
1291 
1292 	/*
1293 	 * Map and establish our interrupt.
1294 	 */
1295 	if (pci_intr_map(pa, &ih)) {
1296 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1297 		return;
1298 	}
1299 	intrstr = pci_intr_string(pc, ih);
1300 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1301 	if (sc->sc_ih == NULL) {
1302 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1303 		if (intrstr != NULL)
1304 			aprint_error(" at %s", intrstr);
1305 		aprint_error("\n");
1306 		return;
1307 	}
1308 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1309 
1310 	/*
1311 	 * Check the function ID (unit number of the chip).
1312 	 */
1313 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1314 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1315 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1316 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1317 	    || (sc->sc_type == WM_T_I350))
1318 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1319 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1320 	else
1321 		sc->sc_funcid = 0;
1322 
1323 	/*
1324 	 * Determine a few things about the bus we're connected to.
1325 	 */
1326 	if (sc->sc_type < WM_T_82543) {
1327 		/* We don't really know the bus characteristics here. */
1328 		sc->sc_bus_speed = 33;
1329 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1330 		/*
1331 		 * CSA (Communication Streaming Architecture) is about as fast
1332 		 * a 32-bit 66MHz PCI Bus.
1333 		 */
1334 		sc->sc_flags |= WM_F_CSA;
1335 		sc->sc_bus_speed = 66;
1336 		aprint_verbose_dev(sc->sc_dev,
1337 		    "Communication Streaming Architecture\n");
1338 		if (sc->sc_type == WM_T_82547) {
1339 			callout_init(&sc->sc_txfifo_ch, 0);
1340 			callout_setfunc(&sc->sc_txfifo_ch,
1341 					wm_82547_txfifo_stall, sc);
1342 			aprint_verbose_dev(sc->sc_dev,
1343 			    "using 82547 Tx FIFO stall work-around\n");
1344 		}
1345 	} else if (sc->sc_type >= WM_T_82571) {
1346 		sc->sc_flags |= WM_F_PCIE;
1347 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1348 		    && (sc->sc_type != WM_T_ICH10)
1349 		    && (sc->sc_type != WM_T_PCH)
1350 		    && (sc->sc_type != WM_T_PCH2)) {
1351 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1352 			/* ICH* and PCH* have no PCIe capability registers */
1353 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1354 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1355 				NULL) == 0)
1356 				aprint_error_dev(sc->sc_dev,
1357 				    "unable to find PCIe capability\n");
1358 		}
1359 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1360 	} else {
1361 		reg = CSR_READ(sc, WMREG_STATUS);
1362 		if (reg & STATUS_BUS64)
1363 			sc->sc_flags |= WM_F_BUS64;
1364 		if ((reg & STATUS_PCIX_MODE) != 0) {
1365 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1366 
1367 			sc->sc_flags |= WM_F_PCIX;
1368 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1369 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1370 				aprint_error_dev(sc->sc_dev,
1371 				    "unable to find PCIX capability\n");
1372 			else if (sc->sc_type != WM_T_82545_3 &&
1373 				 sc->sc_type != WM_T_82546_3) {
1374 				/*
1375 				 * Work around a problem caused by the BIOS
1376 				 * setting the max memory read byte count
1377 				 * incorrectly.
1378 				 */
1379 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1380 				    sc->sc_pcixe_capoff + PCIX_CMD);
1381 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1382 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1383 
1384 				bytecnt =
1385 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1386 				    PCIX_CMD_BYTECNT_SHIFT;
1387 				maxb =
1388 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1389 				    PCIX_STATUS_MAXB_SHIFT;
1390 				if (bytecnt > maxb) {
1391 					aprint_verbose_dev(sc->sc_dev,
1392 					    "resetting PCI-X MMRBC: %d -> %d\n",
1393 					    512 << bytecnt, 512 << maxb);
1394 					pcix_cmd = (pcix_cmd &
1395 					    ~PCIX_CMD_BYTECNT_MASK) |
1396 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1397 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1398 					    sc->sc_pcixe_capoff + PCIX_CMD,
1399 					    pcix_cmd);
1400 				}
1401 			}
1402 		}
1403 		/*
1404 		 * The quad port adapter is special; it has a PCIX-PCIX
1405 		 * bridge on the board, and can run the secondary bus at
1406 		 * a higher speed.
1407 		 */
1408 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1409 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1410 								      : 66;
1411 		} else if (sc->sc_flags & WM_F_PCIX) {
1412 			switch (reg & STATUS_PCIXSPD_MASK) {
1413 			case STATUS_PCIXSPD_50_66:
1414 				sc->sc_bus_speed = 66;
1415 				break;
1416 			case STATUS_PCIXSPD_66_100:
1417 				sc->sc_bus_speed = 100;
1418 				break;
1419 			case STATUS_PCIXSPD_100_133:
1420 				sc->sc_bus_speed = 133;
1421 				break;
1422 			default:
1423 				aprint_error_dev(sc->sc_dev,
1424 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1425 				    reg & STATUS_PCIXSPD_MASK);
1426 				sc->sc_bus_speed = 66;
1427 				break;
1428 			}
1429 		} else
1430 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1431 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1432 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1433 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1434 	}
1435 
1436 	/*
1437 	 * Allocate the control data structures, and create and load the
1438 	 * DMA map for it.
1439 	 *
1440 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1441 	 * memory.  So must Rx descriptors.  We simplify by allocating
1442 	 * both sets within the same 4G segment.
1443 	 */
1444 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1445 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1446 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1447 	    sizeof(struct wm_control_data_82542) :
1448 	    sizeof(struct wm_control_data_82544);
1449 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1450 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1451 		    &sc->sc_cd_rseg, 0)) != 0) {
1452 		aprint_error_dev(sc->sc_dev,
1453 		    "unable to allocate control data, error = %d\n",
1454 		    error);
1455 		goto fail_0;
1456 	}
1457 
1458 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1459 		    sc->sc_cd_rseg, sc->sc_cd_size,
1460 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1461 		aprint_error_dev(sc->sc_dev,
1462 		    "unable to map control data, error = %d\n", error);
1463 		goto fail_1;
1464 	}
1465 
1466 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1467 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1468 		aprint_error_dev(sc->sc_dev,
1469 		    "unable to create control data DMA map, error = %d\n",
1470 		    error);
1471 		goto fail_2;
1472 	}
1473 
1474 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1475 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1476 		aprint_error_dev(sc->sc_dev,
1477 		    "unable to load control data DMA map, error = %d\n",
1478 		    error);
1479 		goto fail_3;
1480 	}
1481 
1482 	/*
1483 	 * Create the transmit buffer DMA maps.
1484 	 */
1485 	WM_TXQUEUELEN(sc) =
1486 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1487 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1488 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1489 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1490 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1491 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1492 			aprint_error_dev(sc->sc_dev,
1493 			    "unable to create Tx DMA map %d, error = %d\n",
1494 			    i, error);
1495 			goto fail_4;
1496 		}
1497 	}
1498 
1499 	/*
1500 	 * Create the receive buffer DMA maps.
1501 	 */
1502 	for (i = 0; i < WM_NRXDESC; i++) {
1503 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1504 			    MCLBYTES, 0, 0,
1505 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1506 			aprint_error_dev(sc->sc_dev,
1507 			    "unable to create Rx DMA map %d error = %d\n",
1508 			    i, error);
1509 			goto fail_5;
1510 		}
1511 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1512 	}
1513 
1514 	/* clear interesting stat counters */
1515 	CSR_READ(sc, WMREG_COLC);
1516 	CSR_READ(sc, WMREG_RXERRC);
1517 
1518 	/* get PHY control from SMBus to PCIe */
1519 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
1520 		wm_smbustopci(sc);
1521 
1522 	/*
1523 	 * Reset the chip to a known state.
1524 	 */
1525 	wm_reset(sc);
1526 
1527 	switch (sc->sc_type) {
1528 	case WM_T_82571:
1529 	case WM_T_82572:
1530 	case WM_T_82573:
1531 	case WM_T_82574:
1532 	case WM_T_82583:
1533 	case WM_T_80003:
1534 	case WM_T_ICH8:
1535 	case WM_T_ICH9:
1536 	case WM_T_ICH10:
1537 	case WM_T_PCH:
1538 	case WM_T_PCH2:
1539 		if (wm_check_mng_mode(sc) != 0)
1540 			wm_get_hw_control(sc);
1541 		break;
1542 	default:
1543 		break;
1544 	}
1545 
1546 	/*
1547 	 * Get some information about the EEPROM.
1548 	 */
1549 	switch (sc->sc_type) {
1550 	case WM_T_82542_2_0:
1551 	case WM_T_82542_2_1:
1552 	case WM_T_82543:
1553 	case WM_T_82544:
1554 		/* Microwire */
1555 		sc->sc_ee_addrbits = 6;
1556 		break;
1557 	case WM_T_82540:
1558 	case WM_T_82545:
1559 	case WM_T_82545_3:
1560 	case WM_T_82546:
1561 	case WM_T_82546_3:
1562 		/* Microwire */
1563 		reg = CSR_READ(sc, WMREG_EECD);
1564 		if (reg & EECD_EE_SIZE)
1565 			sc->sc_ee_addrbits = 8;
1566 		else
1567 			sc->sc_ee_addrbits = 6;
1568 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1569 		break;
1570 	case WM_T_82541:
1571 	case WM_T_82541_2:
1572 	case WM_T_82547:
1573 	case WM_T_82547_2:
1574 		reg = CSR_READ(sc, WMREG_EECD);
1575 		if (reg & EECD_EE_TYPE) {
1576 			/* SPI */
1577 			wm_set_spiaddrbits(sc);
1578 		} else
1579 			/* Microwire */
1580 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1581 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1582 		break;
1583 	case WM_T_82571:
1584 	case WM_T_82572:
1585 		/* SPI */
1586 		wm_set_spiaddrbits(sc);
1587 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1588 		break;
1589 	case WM_T_82573:
1590 	case WM_T_82574:
1591 	case WM_T_82583:
1592 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1593 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1594 		else {
1595 			/* SPI */
1596 			wm_set_spiaddrbits(sc);
1597 		}
1598 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1599 		break;
1600 	case WM_T_82575:
1601 	case WM_T_82576:
1602 	case WM_T_82580:
1603 	case WM_T_82580ER:
1604 	case WM_T_I350:
1605 	case WM_T_80003:
1606 		/* SPI */
1607 		wm_set_spiaddrbits(sc);
1608 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1609 		break;
1610 	case WM_T_ICH8:
1611 	case WM_T_ICH9:
1612 	case WM_T_ICH10:
1613 	case WM_T_PCH:
1614 	case WM_T_PCH2:
1615 		/* FLASH */
1616 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1617 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1618 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1619 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1620 			aprint_error_dev(sc->sc_dev,
1621 			    "can't map FLASH registers\n");
1622 			return;
1623 		}
1624 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1625 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1626 						ICH_FLASH_SECTOR_SIZE;
1627 		sc->sc_ich8_flash_bank_size =
1628 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1629 		sc->sc_ich8_flash_bank_size -=
1630 		    (reg & ICH_GFPREG_BASE_MASK);
1631 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1632 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1633 		break;
1634 	case WM_T_I210:
1635 	case WM_T_I211:
1636 #if 1
1637 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1638 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1639 #endif
1640 		break;
1641 	default:
1642 		break;
1643 	}
1644 
1645 	/*
1646 	 * Defer printing the EEPROM type until after verifying the checksum
1647 	 * This allows the EEPROM type to be printed correctly in the case
1648 	 * that no EEPROM is attached.
1649 	 */
1650 	/*
1651 	 * Validate the EEPROM checksum. If the checksum fails, flag
1652 	 * this for later, so we can fail future reads from the EEPROM.
1653 	 */
1654 	if (wm_validate_eeprom_checksum(sc)) {
1655 		/*
1656 		 * Read twice again because some PCI-e parts fail the
1657 		 * first check due to the link being in sleep state.
1658 		 */
1659 		if (wm_validate_eeprom_checksum(sc))
1660 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1661 	}
1662 
1663 	/* Set device properties (macflags) */
1664 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1665 
1666 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1667 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1668 	else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1669 		aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1670 	} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1671 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1672 	} else {
1673 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1674 			eetype = "SPI";
1675 		else
1676 			eetype = "MicroWire";
1677 		aprint_verbose_dev(sc->sc_dev,
1678 		    "%u word (%d address bits) %s EEPROM\n",
1679 		    1U << sc->sc_ee_addrbits,
1680 		    sc->sc_ee_addrbits, eetype);
1681 	}
1682 
1683 	/*
1684 	 * Read the Ethernet address from the EEPROM, if not first found
1685 	 * in device properties.
1686 	 */
1687 	ea = prop_dictionary_get(dict, "mac-address");
1688 	if (ea != NULL) {
1689 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1690 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1691 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1692 	} else {
1693 		if (wm_read_mac_addr(sc, enaddr) != 0) {
1694 			aprint_error_dev(sc->sc_dev,
1695 			    "unable to read Ethernet address\n");
1696 			return;
1697 		}
1698 	}
1699 
1700 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1701 	    ether_sprintf(enaddr));
1702 
1703 	/*
1704 	 * Read the config info from the EEPROM, and set up various
1705 	 * bits in the control registers based on their contents.
1706 	 */
1707 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1708 	if (pn != NULL) {
1709 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1710 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1711 	} else {
1712 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1713 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1714 			return;
1715 		}
1716 	}
1717 
1718 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1719 	if (pn != NULL) {
1720 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1721 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1722 	} else {
1723 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1724 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1725 			return;
1726 		}
1727 	}
1728 
1729 	/* check for WM_F_WOL */
1730 	switch (sc->sc_type) {
1731 	case WM_T_82542_2_0:
1732 	case WM_T_82542_2_1:
1733 	case WM_T_82543:
1734 		/* dummy? */
1735 		eeprom_data = 0;
1736 		apme_mask = EEPROM_CFG3_APME;
1737 		break;
1738 	case WM_T_82544:
1739 		apme_mask = EEPROM_CFG2_82544_APM_EN;
1740 		eeprom_data = cfg2;
1741 		break;
1742 	case WM_T_82546:
1743 	case WM_T_82546_3:
1744 	case WM_T_82571:
1745 	case WM_T_82572:
1746 	case WM_T_82573:
1747 	case WM_T_82574:
1748 	case WM_T_82583:
1749 	case WM_T_80003:
1750 	default:
1751 		apme_mask = EEPROM_CFG3_APME;
1752 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1753 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1754 		break;
1755 	case WM_T_82575:
1756 	case WM_T_82576:
1757 	case WM_T_82580:
1758 	case WM_T_82580ER:
1759 	case WM_T_I350:
1760 	case WM_T_ICH8:
1761 	case WM_T_ICH9:
1762 	case WM_T_ICH10:
1763 	case WM_T_PCH:
1764 	case WM_T_PCH2:
1765 		/* XXX The funcid should be checked on some devices */
1766 		apme_mask = WUC_APME;
1767 		eeprom_data = CSR_READ(sc, WMREG_WUC);
1768 		break;
1769 	}
1770 
1771 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1772 	if ((eeprom_data & apme_mask) != 0)
1773 		sc->sc_flags |= WM_F_WOL;
1774 #ifdef WM_DEBUG
1775 	if ((sc->sc_flags & WM_F_WOL) != 0)
1776 		printf("WOL\n");
1777 #endif
1778 
1779 	/*
1780 	 * XXX need special handling for some multiple port cards
1781 	 * to disable a paticular port.
1782 	 */
1783 
1784 	if (sc->sc_type >= WM_T_82544) {
1785 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1786 		if (pn != NULL) {
1787 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1788 			swdpin = (uint16_t) prop_number_integer_value(pn);
1789 		} else {
1790 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1791 				aprint_error_dev(sc->sc_dev,
1792 				    "unable to read SWDPIN\n");
1793 				return;
1794 			}
1795 		}
1796 	}
1797 
1798 	if (cfg1 & EEPROM_CFG1_ILOS)
1799 		sc->sc_ctrl |= CTRL_ILOS;
1800 	if (sc->sc_type >= WM_T_82544) {
1801 		sc->sc_ctrl |=
1802 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1803 		    CTRL_SWDPIO_SHIFT;
1804 		sc->sc_ctrl |=
1805 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1806 		    CTRL_SWDPINS_SHIFT;
1807 	} else {
1808 		sc->sc_ctrl |=
1809 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1810 		    CTRL_SWDPIO_SHIFT;
1811 	}
1812 
1813 #if 0
1814 	if (sc->sc_type >= WM_T_82544) {
1815 		if (cfg1 & EEPROM_CFG1_IPS0)
1816 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1817 		if (cfg1 & EEPROM_CFG1_IPS1)
1818 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1819 		sc->sc_ctrl_ext |=
1820 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1821 		    CTRL_EXT_SWDPIO_SHIFT;
1822 		sc->sc_ctrl_ext |=
1823 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1824 		    CTRL_EXT_SWDPINS_SHIFT;
1825 	} else {
1826 		sc->sc_ctrl_ext |=
1827 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1828 		    CTRL_EXT_SWDPIO_SHIFT;
1829 	}
1830 #endif
1831 
1832 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1833 #if 0
1834 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1835 #endif
1836 
1837 	/*
1838 	 * Set up some register offsets that are different between
1839 	 * the i82542 and the i82543 and later chips.
1840 	 */
1841 	if (sc->sc_type < WM_T_82543) {
1842 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1843 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1844 	} else {
1845 		sc->sc_rdt_reg = WMREG_RDT;
1846 		sc->sc_tdt_reg = WMREG_TDT;
1847 	}
1848 
1849 	if (sc->sc_type == WM_T_PCH) {
1850 		uint16_t val;
1851 
1852 		/* Save the NVM K1 bit setting */
1853 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1854 
1855 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1856 			sc->sc_nvm_k1_enabled = 1;
1857 		else
1858 			sc->sc_nvm_k1_enabled = 0;
1859 	}
1860 
1861 	/*
1862 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1863 	 * media structures accordingly.
1864 	 */
1865 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1866 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1867 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_82573
1868 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1869 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1870 		wm_gmii_mediainit(sc, wmp->wmp_product);
1871 	} else if (sc->sc_type < WM_T_82543 ||
1872 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1873 		if (wmp->wmp_flags & WMP_F_1000T)
1874 			aprint_error_dev(sc->sc_dev,
1875 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1876 		wm_tbi_mediainit(sc);
1877 	} else {
1878 		switch (sc->sc_type) {
1879 		case WM_T_82575:
1880 		case WM_T_82576:
1881 		case WM_T_82580:
1882 		case WM_T_82580ER:
1883 		case WM_T_I350:
1884 		case WM_T_I210:
1885 		case WM_T_I211:
1886 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
1887 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1888 			case CTRL_EXT_LINK_MODE_SGMII:
1889 				aprint_verbose_dev(sc->sc_dev, "SGMII\n");
1890 				sc->sc_flags |= WM_F_SGMII;
1891 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1892 				    reg | CTRL_EXT_I2C_ENA);
1893 				wm_gmii_mediainit(sc, wmp->wmp_product);
1894 				break;
1895 			case CTRL_EXT_LINK_MODE_1000KX:
1896 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1897 				aprint_verbose_dev(sc->sc_dev, "1000KX or SERDES\n");
1898 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1899 				    reg | CTRL_EXT_I2C_ENA);
1900 				panic("not supported yet\n");
1901 				break;
1902 			case CTRL_EXT_LINK_MODE_GMII:
1903 			default:
1904 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1905 				    reg & ~CTRL_EXT_I2C_ENA);
1906 				wm_gmii_mediainit(sc, wmp->wmp_product);
1907 				break;
1908 			}
1909 			break;
1910 		default:
1911 			if (wmp->wmp_flags & WMP_F_1000X)
1912 				aprint_error_dev(sc->sc_dev,
1913 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1914 			wm_gmii_mediainit(sc, wmp->wmp_product);
1915 		}
1916 	}
1917 
1918 	ifp = &sc->sc_ethercom.ec_if;
1919 	xname = device_xname(sc->sc_dev);
1920 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1921 	ifp->if_softc = sc;
1922 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1923 	ifp->if_ioctl = wm_ioctl;
1924 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1925 		ifp->if_start = wm_nq_start;
1926 	else
1927 		ifp->if_start = wm_start;
1928 	ifp->if_watchdog = wm_watchdog;
1929 	ifp->if_init = wm_init;
1930 	ifp->if_stop = wm_stop;
1931 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1932 	IFQ_SET_READY(&ifp->if_snd);
1933 
1934 	/* Check for jumbo frame */
1935 	switch (sc->sc_type) {
1936 	case WM_T_82573:
1937 		/* XXX limited to 9234 if ASPM is disabled */
1938 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1939 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1940 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1941 		break;
1942 	case WM_T_82571:
1943 	case WM_T_82572:
1944 	case WM_T_82574:
1945 	case WM_T_82575:
1946 	case WM_T_82576:
1947 	case WM_T_82580:
1948 	case WM_T_82580ER:
1949 	case WM_T_I350:
1950 	case WM_T_I210:
1951 	case WM_T_I211:
1952 	case WM_T_80003:
1953 	case WM_T_ICH9:
1954 	case WM_T_ICH10:
1955 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
1956 		/* XXX limited to 9234 */
1957 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1958 		break;
1959 	case WM_T_PCH:
1960 		/* XXX limited to 4096 */
1961 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1962 		break;
1963 	case WM_T_82542_2_0:
1964 	case WM_T_82542_2_1:
1965 	case WM_T_82583:
1966 	case WM_T_ICH8:
1967 		/* No support for jumbo frame */
1968 		break;
1969 	default:
1970 		/* ETHER_MAX_LEN_JUMBO */
1971 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1972 		break;
1973 	}
1974 
1975 	/*
1976 	 * If we're a i82543 or greater, we can support VLANs.
1977 	 */
1978 	if (sc->sc_type >= WM_T_82543)
1979 		sc->sc_ethercom.ec_capabilities |=
1980 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
1981 
1982 	/*
1983 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
1984 	 * on i82543 and later.
1985 	 */
1986 	if (sc->sc_type >= WM_T_82543) {
1987 		ifp->if_capabilities |=
1988 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
1989 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1990 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1991 		    IFCAP_CSUM_TCPv6_Tx |
1992 		    IFCAP_CSUM_UDPv6_Tx;
1993 	}
1994 
1995 	/*
1996 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
1997 	 *
1998 	 *	82541GI (8086:1076) ... no
1999 	 *	82572EI (8086:10b9) ... yes
2000 	 */
2001 	if (sc->sc_type >= WM_T_82571) {
2002 		ifp->if_capabilities |=
2003 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2004 	}
2005 
2006 	/*
2007 	 * If we're a i82544 or greater (except i82547), we can do
2008 	 * TCP segmentation offload.
2009 	 */
2010 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2011 		ifp->if_capabilities |= IFCAP_TSOv4;
2012 	}
2013 
2014 	if (sc->sc_type >= WM_T_82571) {
2015 		ifp->if_capabilities |= IFCAP_TSOv6;
2016 	}
2017 
2018 	/*
2019 	 * Attach the interface.
2020 	 */
2021 	if_attach(ifp);
2022 	ether_ifattach(ifp, enaddr);
2023 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2024 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2025 
2026 #ifdef WM_EVENT_COUNTERS
2027 	/* Attach event counters. */
2028 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2029 	    NULL, xname, "txsstall");
2030 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2031 	    NULL, xname, "txdstall");
2032 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2033 	    NULL, xname, "txfifo_stall");
2034 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2035 	    NULL, xname, "txdw");
2036 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2037 	    NULL, xname, "txqe");
2038 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2039 	    NULL, xname, "rxintr");
2040 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2041 	    NULL, xname, "linkintr");
2042 
2043 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2044 	    NULL, xname, "rxipsum");
2045 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2046 	    NULL, xname, "rxtusum");
2047 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2048 	    NULL, xname, "txipsum");
2049 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2050 	    NULL, xname, "txtusum");
2051 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2052 	    NULL, xname, "txtusum6");
2053 
2054 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2055 	    NULL, xname, "txtso");
2056 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2057 	    NULL, xname, "txtso6");
2058 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2059 	    NULL, xname, "txtsopain");
2060 
2061 	for (i = 0; i < WM_NTXSEGS; i++) {
2062 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2063 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2064 		    NULL, xname, wm_txseg_evcnt_names[i]);
2065 	}
2066 
2067 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2068 	    NULL, xname, "txdrop");
2069 
2070 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2071 	    NULL, xname, "tu");
2072 
2073 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2074 	    NULL, xname, "tx_xoff");
2075 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2076 	    NULL, xname, "tx_xon");
2077 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2078 	    NULL, xname, "rx_xoff");
2079 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2080 	    NULL, xname, "rx_xon");
2081 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2082 	    NULL, xname, "rx_macctl");
2083 #endif /* WM_EVENT_COUNTERS */
2084 
2085 	if (pmf_device_register(self, wm_suspend, wm_resume))
2086 		pmf_class_network_register(self, ifp);
2087 	else
2088 		aprint_error_dev(self, "couldn't establish power handler\n");
2089 
2090 	return;
2091 
2092 	/*
2093 	 * Free any resources we've allocated during the failed attach
2094 	 * attempt.  Do this in reverse order and fall through.
2095 	 */
2096  fail_5:
2097 	for (i = 0; i < WM_NRXDESC; i++) {
2098 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2099 			bus_dmamap_destroy(sc->sc_dmat,
2100 			    sc->sc_rxsoft[i].rxs_dmamap);
2101 	}
2102  fail_4:
2103 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2104 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2105 			bus_dmamap_destroy(sc->sc_dmat,
2106 			    sc->sc_txsoft[i].txs_dmamap);
2107 	}
2108 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2109  fail_3:
2110 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2111  fail_2:
2112 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2113 	    sc->sc_cd_size);
2114  fail_1:
2115 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2116  fail_0:
2117 	return;
2118 }
2119 
2120 static int
2121 wm_detach(device_t self, int flags __unused)
2122 {
2123 	struct wm_softc *sc = device_private(self);
2124 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2125 	int i, s;
2126 
2127 	s = splnet();
2128 	/* Stop the interface. Callouts are stopped in it. */
2129 	wm_stop(ifp, 1);
2130 	splx(s);
2131 
2132 	pmf_device_deregister(self);
2133 
2134 	/* Tell the firmware about the release */
2135 	wm_release_manageability(sc);
2136 	wm_release_hw_control(sc);
2137 
2138 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2139 
2140 	/* Delete all remaining media. */
2141 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2142 
2143 	ether_ifdetach(ifp);
2144 	if_detach(ifp);
2145 
2146 
2147 	/* Unload RX dmamaps and free mbufs */
2148 	wm_rxdrain(sc);
2149 
2150 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2151 	for (i = 0; i < WM_NRXDESC; i++) {
2152 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2153 			bus_dmamap_destroy(sc->sc_dmat,
2154 			    sc->sc_rxsoft[i].rxs_dmamap);
2155 	}
2156 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2157 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2158 			bus_dmamap_destroy(sc->sc_dmat,
2159 			    sc->sc_txsoft[i].txs_dmamap);
2160 	}
2161 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2162 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2163 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2164 	    sc->sc_cd_size);
2165 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2166 
2167 	/* Disestablish the interrupt handler */
2168 	if (sc->sc_ih != NULL) {
2169 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2170 		sc->sc_ih = NULL;
2171 	}
2172 
2173 	/* Unmap the registers */
2174 	if (sc->sc_ss) {
2175 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2176 		sc->sc_ss = 0;
2177 	}
2178 
2179 	if (sc->sc_ios) {
2180 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2181 		sc->sc_ios = 0;
2182 	}
2183 
2184 	return 0;
2185 }
2186 
2187 /*
2188  * wm_tx_offload:
2189  *
2190  *	Set up TCP/IP checksumming parameters for the
2191  *	specified packet.
2192  */
2193 static int
2194 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2195     uint8_t *fieldsp)
2196 {
2197 	struct mbuf *m0 = txs->txs_mbuf;
2198 	struct livengood_tcpip_ctxdesc *t;
2199 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
2200 	uint32_t ipcse;
2201 	struct ether_header *eh;
2202 	int offset, iphl;
2203 	uint8_t fields;
2204 
2205 	/*
2206 	 * XXX It would be nice if the mbuf pkthdr had offset
2207 	 * fields for the protocol headers.
2208 	 */
2209 
2210 	eh = mtod(m0, struct ether_header *);
2211 	switch (htons(eh->ether_type)) {
2212 	case ETHERTYPE_IP:
2213 	case ETHERTYPE_IPV6:
2214 		offset = ETHER_HDR_LEN;
2215 		break;
2216 
2217 	case ETHERTYPE_VLAN:
2218 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2219 		break;
2220 
2221 	default:
2222 		/*
2223 		 * Don't support this protocol or encapsulation.
2224 		 */
2225 		*fieldsp = 0;
2226 		*cmdp = 0;
2227 		return 0;
2228 	}
2229 
2230 	if ((m0->m_pkthdr.csum_flags &
2231 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2232 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2233 	} else {
2234 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2235 	}
2236 	ipcse = offset + iphl - 1;
2237 
2238 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2239 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2240 	seg = 0;
2241 	fields = 0;
2242 
2243 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2244 		int hlen = offset + iphl;
2245 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2246 
2247 		if (__predict_false(m0->m_len <
2248 				    (hlen + sizeof(struct tcphdr)))) {
2249 			/*
2250 			 * TCP/IP headers are not in the first mbuf; we need
2251 			 * to do this the slow and painful way.  Let's just
2252 			 * hope this doesn't happen very often.
2253 			 */
2254 			struct tcphdr th;
2255 
2256 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2257 
2258 			m_copydata(m0, hlen, sizeof(th), &th);
2259 			if (v4) {
2260 				struct ip ip;
2261 
2262 				m_copydata(m0, offset, sizeof(ip), &ip);
2263 				ip.ip_len = 0;
2264 				m_copyback(m0,
2265 				    offset + offsetof(struct ip, ip_len),
2266 				    sizeof(ip.ip_len), &ip.ip_len);
2267 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2268 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2269 			} else {
2270 				struct ip6_hdr ip6;
2271 
2272 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2273 				ip6.ip6_plen = 0;
2274 				m_copyback(m0,
2275 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2276 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2277 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2278 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2279 			}
2280 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2281 			    sizeof(th.th_sum), &th.th_sum);
2282 
2283 			hlen += th.th_off << 2;
2284 		} else {
2285 			/*
2286 			 * TCP/IP headers are in the first mbuf; we can do
2287 			 * this the easy way.
2288 			 */
2289 			struct tcphdr *th;
2290 
2291 			if (v4) {
2292 				struct ip *ip =
2293 				    (void *)(mtod(m0, char *) + offset);
2294 				th = (void *)(mtod(m0, char *) + hlen);
2295 
2296 				ip->ip_len = 0;
2297 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2298 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2299 			} else {
2300 				struct ip6_hdr *ip6 =
2301 				    (void *)(mtod(m0, char *) + offset);
2302 				th = (void *)(mtod(m0, char *) + hlen);
2303 
2304 				ip6->ip6_plen = 0;
2305 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2306 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2307 			}
2308 			hlen += th->th_off << 2;
2309 		}
2310 
2311 		if (v4) {
2312 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2313 			cmdlen |= WTX_TCPIP_CMD_IP;
2314 		} else {
2315 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2316 			ipcse = 0;
2317 		}
2318 		cmd |= WTX_TCPIP_CMD_TSE;
2319 		cmdlen |= WTX_TCPIP_CMD_TSE |
2320 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2321 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2322 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2323 	}
2324 
2325 	/*
2326 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2327 	 * offload feature, if we load the context descriptor, we
2328 	 * MUST provide valid values for IPCSS and TUCSS fields.
2329 	 */
2330 
2331 	ipcs = WTX_TCPIP_IPCSS(offset) |
2332 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2333 	    WTX_TCPIP_IPCSE(ipcse);
2334 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2335 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2336 		fields |= WTX_IXSM;
2337 	}
2338 
2339 	offset += iphl;
2340 
2341 	if (m0->m_pkthdr.csum_flags &
2342 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2343 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2344 		fields |= WTX_TXSM;
2345 		tucs = WTX_TCPIP_TUCSS(offset) |
2346 		    WTX_TCPIP_TUCSO(offset +
2347 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2348 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2349 	} else if ((m0->m_pkthdr.csum_flags &
2350 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2351 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2352 		fields |= WTX_TXSM;
2353 		tucs = WTX_TCPIP_TUCSS(offset) |
2354 		    WTX_TCPIP_TUCSO(offset +
2355 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2356 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2357 	} else {
2358 		/* Just initialize it to a valid TCP context. */
2359 		tucs = WTX_TCPIP_TUCSS(offset) |
2360 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2361 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2362 	}
2363 
2364 	/* Fill in the context descriptor. */
2365 	t = (struct livengood_tcpip_ctxdesc *)
2366 	    &sc->sc_txdescs[sc->sc_txnext];
2367 	t->tcpip_ipcs = htole32(ipcs);
2368 	t->tcpip_tucs = htole32(tucs);
2369 	t->tcpip_cmdlen = htole32(cmdlen);
2370 	t->tcpip_seg = htole32(seg);
2371 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2372 
2373 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2374 	txs->txs_ndesc++;
2375 
2376 	*cmdp = cmd;
2377 	*fieldsp = fields;
2378 
2379 	return 0;
2380 }
2381 
2382 static void
2383 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2384 {
2385 	struct mbuf *m;
2386 	int i;
2387 
2388 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2389 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2390 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2391 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2392 		    m->m_data, m->m_len, m->m_flags);
2393 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2394 	    i, i == 1 ? "" : "s");
2395 }
2396 
2397 /*
2398  * wm_82547_txfifo_stall:
2399  *
2400  *	Callout used to wait for the 82547 Tx FIFO to drain,
2401  *	reset the FIFO pointers, and restart packet transmission.
2402  */
2403 static void
2404 wm_82547_txfifo_stall(void *arg)
2405 {
2406 	struct wm_softc *sc = arg;
2407 	int s;
2408 
2409 	s = splnet();
2410 
2411 	if (sc->sc_txfifo_stall) {
2412 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2413 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2414 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2415 			/*
2416 			 * Packets have drained.  Stop transmitter, reset
2417 			 * FIFO pointers, restart transmitter, and kick
2418 			 * the packet queue.
2419 			 */
2420 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2421 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2422 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2423 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2424 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2425 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2426 			CSR_WRITE(sc, WMREG_TCTL, tctl);
2427 			CSR_WRITE_FLUSH(sc);
2428 
2429 			sc->sc_txfifo_head = 0;
2430 			sc->sc_txfifo_stall = 0;
2431 			wm_start(&sc->sc_ethercom.ec_if);
2432 		} else {
2433 			/*
2434 			 * Still waiting for packets to drain; try again in
2435 			 * another tick.
2436 			 */
2437 			callout_schedule(&sc->sc_txfifo_ch, 1);
2438 		}
2439 	}
2440 
2441 	splx(s);
2442 }
2443 
2444 static void
2445 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2446 {
2447 	uint32_t reg;
2448 
2449 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2450 
2451 	if (on != 0)
2452 		reg |= EXTCNFCTR_GATE_PHY_CFG;
2453 	else
2454 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2455 
2456 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2457 }
2458 
2459 /*
2460  * wm_82547_txfifo_bugchk:
2461  *
2462  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2463  *	prevent enqueueing a packet that would wrap around the end
2464  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2465  *
2466  *	We do this by checking the amount of space before the end
2467  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2468  *	the Tx FIFO, wait for all remaining packets to drain, reset
2469  *	the internal FIFO pointers to the beginning, and restart
2470  *	transmission on the interface.
2471  */
2472 #define	WM_FIFO_HDR		0x10
2473 #define	WM_82547_PAD_LEN	0x3e0
2474 static int
2475 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2476 {
2477 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2478 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2479 
2480 	/* Just return if already stalled. */
2481 	if (sc->sc_txfifo_stall)
2482 		return 1;
2483 
2484 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2485 		/* Stall only occurs in half-duplex mode. */
2486 		goto send_packet;
2487 	}
2488 
2489 	if (len >= WM_82547_PAD_LEN + space) {
2490 		sc->sc_txfifo_stall = 1;
2491 		callout_schedule(&sc->sc_txfifo_ch, 1);
2492 		return 1;
2493 	}
2494 
2495  send_packet:
2496 	sc->sc_txfifo_head += len;
2497 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2498 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2499 
2500 	return 0;
2501 }
2502 
2503 /*
2504  * wm_start:		[ifnet interface function]
2505  *
2506  *	Start packet transmission on the interface.
2507  */
2508 static void
2509 wm_start(struct ifnet *ifp)
2510 {
2511 	struct wm_softc *sc = ifp->if_softc;
2512 	struct mbuf *m0;
2513 	struct m_tag *mtag;
2514 	struct wm_txsoft *txs;
2515 	bus_dmamap_t dmamap;
2516 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2517 	bus_addr_t curaddr;
2518 	bus_size_t seglen, curlen;
2519 	uint32_t cksumcmd;
2520 	uint8_t cksumfields;
2521 
2522 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2523 		return;
2524 
2525 	/*
2526 	 * Remember the previous number of free descriptors.
2527 	 */
2528 	ofree = sc->sc_txfree;
2529 
2530 	/*
2531 	 * Loop through the send queue, setting up transmit descriptors
2532 	 * until we drain the queue, or use up all available transmit
2533 	 * descriptors.
2534 	 */
2535 	for (;;) {
2536 		/* Grab a packet off the queue. */
2537 		IFQ_POLL(&ifp->if_snd, m0);
2538 		if (m0 == NULL)
2539 			break;
2540 
2541 		DPRINTF(WM_DEBUG_TX,
2542 		    ("%s: TX: have packet to transmit: %p\n",
2543 		    device_xname(sc->sc_dev), m0));
2544 
2545 		/* Get a work queue entry. */
2546 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2547 			wm_txintr(sc);
2548 			if (sc->sc_txsfree == 0) {
2549 				DPRINTF(WM_DEBUG_TX,
2550 				    ("%s: TX: no free job descriptors\n",
2551 					device_xname(sc->sc_dev)));
2552 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2553 				break;
2554 			}
2555 		}
2556 
2557 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2558 		dmamap = txs->txs_dmamap;
2559 
2560 		use_tso = (m0->m_pkthdr.csum_flags &
2561 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2562 
2563 		/*
2564 		 * So says the Linux driver:
2565 		 * The controller does a simple calculation to make sure
2566 		 * there is enough room in the FIFO before initiating the
2567 		 * DMA for each buffer.  The calc is:
2568 		 *	4 = ceil(buffer len / MSS)
2569 		 * To make sure we don't overrun the FIFO, adjust the max
2570 		 * buffer len if the MSS drops.
2571 		 */
2572 		dmamap->dm_maxsegsz =
2573 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2574 		    ? m0->m_pkthdr.segsz << 2
2575 		    : WTX_MAX_LEN;
2576 
2577 		/*
2578 		 * Load the DMA map.  If this fails, the packet either
2579 		 * didn't fit in the allotted number of segments, or we
2580 		 * were short on resources.  For the too-many-segments
2581 		 * case, we simply report an error and drop the packet,
2582 		 * since we can't sanely copy a jumbo packet to a single
2583 		 * buffer.
2584 		 */
2585 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2586 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2587 		if (error) {
2588 			if (error == EFBIG) {
2589 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2590 				log(LOG_ERR, "%s: Tx packet consumes too many "
2591 				    "DMA segments, dropping...\n",
2592 				    device_xname(sc->sc_dev));
2593 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2594 				wm_dump_mbuf_chain(sc, m0);
2595 				m_freem(m0);
2596 				continue;
2597 			}
2598 			/*
2599 			 * Short on resources, just stop for now.
2600 			 */
2601 			DPRINTF(WM_DEBUG_TX,
2602 			    ("%s: TX: dmamap load failed: %d\n",
2603 			    device_xname(sc->sc_dev), error));
2604 			break;
2605 		}
2606 
2607 		segs_needed = dmamap->dm_nsegs;
2608 		if (use_tso) {
2609 			/* For sentinel descriptor; see below. */
2610 			segs_needed++;
2611 		}
2612 
2613 		/*
2614 		 * Ensure we have enough descriptors free to describe
2615 		 * the packet.  Note, we always reserve one descriptor
2616 		 * at the end of the ring due to the semantics of the
2617 		 * TDT register, plus one more in the event we need
2618 		 * to load offload context.
2619 		 */
2620 		if (segs_needed > sc->sc_txfree - 2) {
2621 			/*
2622 			 * Not enough free descriptors to transmit this
2623 			 * packet.  We haven't committed anything yet,
2624 			 * so just unload the DMA map, put the packet
2625 			 * pack on the queue, and punt.  Notify the upper
2626 			 * layer that there are no more slots left.
2627 			 */
2628 			DPRINTF(WM_DEBUG_TX,
2629 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2630 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2631 			    segs_needed, sc->sc_txfree - 1));
2632 			ifp->if_flags |= IFF_OACTIVE;
2633 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2634 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2635 			break;
2636 		}
2637 
2638 		/*
2639 		 * Check for 82547 Tx FIFO bug.  We need to do this
2640 		 * once we know we can transmit the packet, since we
2641 		 * do some internal FIFO space accounting here.
2642 		 */
2643 		if (sc->sc_type == WM_T_82547 &&
2644 		    wm_82547_txfifo_bugchk(sc, m0)) {
2645 			DPRINTF(WM_DEBUG_TX,
2646 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2647 			    device_xname(sc->sc_dev)));
2648 			ifp->if_flags |= IFF_OACTIVE;
2649 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2650 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2651 			break;
2652 		}
2653 
2654 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2655 
2656 		/*
2657 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2658 		 */
2659 
2660 		DPRINTF(WM_DEBUG_TX,
2661 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2662 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2663 
2664 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2665 
2666 		/*
2667 		 * Store a pointer to the packet so that we can free it
2668 		 * later.
2669 		 *
2670 		 * Initially, we consider the number of descriptors the
2671 		 * packet uses the number of DMA segments.  This may be
2672 		 * incremented by 1 if we do checksum offload (a descriptor
2673 		 * is used to set the checksum context).
2674 		 */
2675 		txs->txs_mbuf = m0;
2676 		txs->txs_firstdesc = sc->sc_txnext;
2677 		txs->txs_ndesc = segs_needed;
2678 
2679 		/* Set up offload parameters for this packet. */
2680 		if (m0->m_pkthdr.csum_flags &
2681 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2682 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2683 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2684 			if (wm_tx_offload(sc, txs, &cksumcmd,
2685 					  &cksumfields) != 0) {
2686 				/* Error message already displayed. */
2687 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2688 				continue;
2689 			}
2690 		} else {
2691 			cksumcmd = 0;
2692 			cksumfields = 0;
2693 		}
2694 
2695 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2696 
2697 		/* Sync the DMA map. */
2698 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2699 		    BUS_DMASYNC_PREWRITE);
2700 
2701 		/*
2702 		 * Initialize the transmit descriptor.
2703 		 */
2704 		for (nexttx = sc->sc_txnext, seg = 0;
2705 		     seg < dmamap->dm_nsegs; seg++) {
2706 			for (seglen = dmamap->dm_segs[seg].ds_len,
2707 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2708 			     seglen != 0;
2709 			     curaddr += curlen, seglen -= curlen,
2710 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2711 				curlen = seglen;
2712 
2713 				/*
2714 				 * So says the Linux driver:
2715 				 * Work around for premature descriptor
2716 				 * write-backs in TSO mode.  Append a
2717 				 * 4-byte sentinel descriptor.
2718 				 */
2719 				if (use_tso &&
2720 				    seg == dmamap->dm_nsegs - 1 &&
2721 				    curlen > 8)
2722 					curlen -= 4;
2723 
2724 				wm_set_dma_addr(
2725 				    &sc->sc_txdescs[nexttx].wtx_addr,
2726 				    curaddr);
2727 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2728 				    htole32(cksumcmd | curlen);
2729 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2730 				    0;
2731 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2732 				    cksumfields;
2733 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2734 				lasttx = nexttx;
2735 
2736 				DPRINTF(WM_DEBUG_TX,
2737 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
2738 				     "len %#04zx\n",
2739 				    device_xname(sc->sc_dev), nexttx,
2740 				    (uint64_t)curaddr, curlen));
2741 			}
2742 		}
2743 
2744 		KASSERT(lasttx != -1);
2745 
2746 		/*
2747 		 * Set up the command byte on the last descriptor of
2748 		 * the packet.  If we're in the interrupt delay window,
2749 		 * delay the interrupt.
2750 		 */
2751 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2752 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2753 
2754 		/*
2755 		 * If VLANs are enabled and the packet has a VLAN tag, set
2756 		 * up the descriptor to encapsulate the packet for us.
2757 		 *
2758 		 * This is only valid on the last descriptor of the packet.
2759 		 */
2760 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2761 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2762 			    htole32(WTX_CMD_VLE);
2763 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2764 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2765 		}
2766 
2767 		txs->txs_lastdesc = lasttx;
2768 
2769 		DPRINTF(WM_DEBUG_TX,
2770 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2771 		    device_xname(sc->sc_dev),
2772 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2773 
2774 		/* Sync the descriptors we're using. */
2775 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2776 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2777 
2778 		/* Give the packet to the chip. */
2779 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2780 
2781 		DPRINTF(WM_DEBUG_TX,
2782 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2783 
2784 		DPRINTF(WM_DEBUG_TX,
2785 		    ("%s: TX: finished transmitting packet, job %d\n",
2786 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2787 
2788 		/* Advance the tx pointer. */
2789 		sc->sc_txfree -= txs->txs_ndesc;
2790 		sc->sc_txnext = nexttx;
2791 
2792 		sc->sc_txsfree--;
2793 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2794 
2795 		/* Pass the packet to any BPF listeners. */
2796 		bpf_mtap(ifp, m0);
2797 	}
2798 
2799 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2800 		/* No more slots; notify upper layer. */
2801 		ifp->if_flags |= IFF_OACTIVE;
2802 	}
2803 
2804 	if (sc->sc_txfree != ofree) {
2805 		/* Set a watchdog timer in case the chip flakes out. */
2806 		ifp->if_timer = 5;
2807 	}
2808 }
2809 
2810 /*
2811  * wm_nq_tx_offload:
2812  *
2813  *	Set up TCP/IP checksumming parameters for the
2814  *	specified packet, for NEWQUEUE devices
2815  */
2816 static int
2817 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2818     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2819 {
2820 	struct mbuf *m0 = txs->txs_mbuf;
2821 	struct m_tag *mtag;
2822 	uint32_t vl_len, mssidx, cmdc;
2823 	struct ether_header *eh;
2824 	int offset, iphl;
2825 
2826 	/*
2827 	 * XXX It would be nice if the mbuf pkthdr had offset
2828 	 * fields for the protocol headers.
2829 	 */
2830 	*cmdlenp = 0;
2831 	*fieldsp = 0;
2832 
2833 	eh = mtod(m0, struct ether_header *);
2834 	switch (htons(eh->ether_type)) {
2835 	case ETHERTYPE_IP:
2836 	case ETHERTYPE_IPV6:
2837 		offset = ETHER_HDR_LEN;
2838 		break;
2839 
2840 	case ETHERTYPE_VLAN:
2841 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2842 		break;
2843 
2844 	default:
2845 		/*
2846 		 * Don't support this protocol or encapsulation.
2847 		 */
2848 		*do_csum = false;
2849 		return 0;
2850 	}
2851 	*do_csum = true;
2852 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2853 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2854 
2855 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2856 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2857 
2858 	if ((m0->m_pkthdr.csum_flags &
2859 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2860 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2861 	} else {
2862 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2863 	}
2864 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2865 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2866 
2867 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2868 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2869 		     << NQTXC_VLLEN_VLAN_SHIFT);
2870 		*cmdlenp |= NQTX_CMD_VLE;
2871 	}
2872 
2873 	mssidx = 0;
2874 
2875 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2876 		int hlen = offset + iphl;
2877 		int tcp_hlen;
2878 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2879 
2880 		if (__predict_false(m0->m_len <
2881 				    (hlen + sizeof(struct tcphdr)))) {
2882 			/*
2883 			 * TCP/IP headers are not in the first mbuf; we need
2884 			 * to do this the slow and painful way.  Let's just
2885 			 * hope this doesn't happen very often.
2886 			 */
2887 			struct tcphdr th;
2888 
2889 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2890 
2891 			m_copydata(m0, hlen, sizeof(th), &th);
2892 			if (v4) {
2893 				struct ip ip;
2894 
2895 				m_copydata(m0, offset, sizeof(ip), &ip);
2896 				ip.ip_len = 0;
2897 				m_copyback(m0,
2898 				    offset + offsetof(struct ip, ip_len),
2899 				    sizeof(ip.ip_len), &ip.ip_len);
2900 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2901 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2902 			} else {
2903 				struct ip6_hdr ip6;
2904 
2905 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2906 				ip6.ip6_plen = 0;
2907 				m_copyback(m0,
2908 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2909 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2910 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2911 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2912 			}
2913 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2914 			    sizeof(th.th_sum), &th.th_sum);
2915 
2916 			tcp_hlen = th.th_off << 2;
2917 		} else {
2918 			/*
2919 			 * TCP/IP headers are in the first mbuf; we can do
2920 			 * this the easy way.
2921 			 */
2922 			struct tcphdr *th;
2923 
2924 			if (v4) {
2925 				struct ip *ip =
2926 				    (void *)(mtod(m0, char *) + offset);
2927 				th = (void *)(mtod(m0, char *) + hlen);
2928 
2929 				ip->ip_len = 0;
2930 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2931 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2932 			} else {
2933 				struct ip6_hdr *ip6 =
2934 				    (void *)(mtod(m0, char *) + offset);
2935 				th = (void *)(mtod(m0, char *) + hlen);
2936 
2937 				ip6->ip6_plen = 0;
2938 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2939 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2940 			}
2941 			tcp_hlen = th->th_off << 2;
2942 		}
2943 		hlen += tcp_hlen;
2944 		*cmdlenp |= NQTX_CMD_TSE;
2945 
2946 		if (v4) {
2947 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2948 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2949 		} else {
2950 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2951 			*fieldsp |= NQTXD_FIELDS_TUXSM;
2952 		}
2953 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2954 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2955 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2956 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2957 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2958 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2959 	} else {
2960 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2961 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2962 	}
2963 
2964 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
2965 		*fieldsp |= NQTXD_FIELDS_IXSM;
2966 		cmdc |= NQTXC_CMD_IP4;
2967 	}
2968 
2969 	if (m0->m_pkthdr.csum_flags &
2970 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2971 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2972 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
2973 			cmdc |= NQTXC_CMD_TCP;
2974 		} else {
2975 			cmdc |= NQTXC_CMD_UDP;
2976 		}
2977 		cmdc |= NQTXC_CMD_IP4;
2978 		*fieldsp |= NQTXD_FIELDS_TUXSM;
2979 	}
2980 	if (m0->m_pkthdr.csum_flags &
2981 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2982 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2983 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
2984 			cmdc |= NQTXC_CMD_TCP;
2985 		} else {
2986 			cmdc |= NQTXC_CMD_UDP;
2987 		}
2988 		cmdc |= NQTXC_CMD_IP6;
2989 		*fieldsp |= NQTXD_FIELDS_TUXSM;
2990 	}
2991 
2992 	/* Fill in the context descriptor. */
2993 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
2994 	    htole32(vl_len);
2995 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
2996 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
2997 	    htole32(cmdc);
2998 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
2999 	    htole32(mssidx);
3000 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
3001 	DPRINTF(WM_DEBUG_TX,
3002 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
3003 	    sc->sc_txnext, 0, vl_len));
3004 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
3005 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
3006 	txs->txs_ndesc++;
3007 	return 0;
3008 }
3009 
3010 /*
3011  * wm_nq_start:		[ifnet interface function]
3012  *
3013  *	Start packet transmission on the interface for NEWQUEUE devices
3014  */
3015 static void
3016 wm_nq_start(struct ifnet *ifp)
3017 {
3018 	struct wm_softc *sc = ifp->if_softc;
3019 	struct mbuf *m0;
3020 	struct m_tag *mtag;
3021 	struct wm_txsoft *txs;
3022 	bus_dmamap_t dmamap;
3023 	int error, nexttx, lasttx = -1, seg, segs_needed;
3024 	bool do_csum, sent;
3025 
3026 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3027 		return;
3028 
3029 	sent = false;
3030 
3031 	/*
3032 	 * Loop through the send queue, setting up transmit descriptors
3033 	 * until we drain the queue, or use up all available transmit
3034 	 * descriptors.
3035 	 */
3036 	for (;;) {
3037 		/* Grab a packet off the queue. */
3038 		IFQ_POLL(&ifp->if_snd, m0);
3039 		if (m0 == NULL)
3040 			break;
3041 
3042 		DPRINTF(WM_DEBUG_TX,
3043 		    ("%s: TX: have packet to transmit: %p\n",
3044 		    device_xname(sc->sc_dev), m0));
3045 
3046 		/* Get a work queue entry. */
3047 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3048 			wm_txintr(sc);
3049 			if (sc->sc_txsfree == 0) {
3050 				DPRINTF(WM_DEBUG_TX,
3051 				    ("%s: TX: no free job descriptors\n",
3052 					device_xname(sc->sc_dev)));
3053 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3054 				break;
3055 			}
3056 		}
3057 
3058 		txs = &sc->sc_txsoft[sc->sc_txsnext];
3059 		dmamap = txs->txs_dmamap;
3060 
3061 		/*
3062 		 * Load the DMA map.  If this fails, the packet either
3063 		 * didn't fit in the allotted number of segments, or we
3064 		 * were short on resources.  For the too-many-segments
3065 		 * case, we simply report an error and drop the packet,
3066 		 * since we can't sanely copy a jumbo packet to a single
3067 		 * buffer.
3068 		 */
3069 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3070 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3071 		if (error) {
3072 			if (error == EFBIG) {
3073 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3074 				log(LOG_ERR, "%s: Tx packet consumes too many "
3075 				    "DMA segments, dropping...\n",
3076 				    device_xname(sc->sc_dev));
3077 				IFQ_DEQUEUE(&ifp->if_snd, m0);
3078 				wm_dump_mbuf_chain(sc, m0);
3079 				m_freem(m0);
3080 				continue;
3081 			}
3082 			/*
3083 			 * Short on resources, just stop for now.
3084 			 */
3085 			DPRINTF(WM_DEBUG_TX,
3086 			    ("%s: TX: dmamap load failed: %d\n",
3087 			    device_xname(sc->sc_dev), error));
3088 			break;
3089 		}
3090 
3091 		segs_needed = dmamap->dm_nsegs;
3092 
3093 		/*
3094 		 * Ensure we have enough descriptors free to describe
3095 		 * the packet.  Note, we always reserve one descriptor
3096 		 * at the end of the ring due to the semantics of the
3097 		 * TDT register, plus one more in the event we need
3098 		 * to load offload context.
3099 		 */
3100 		if (segs_needed > sc->sc_txfree - 2) {
3101 			/*
3102 			 * Not enough free descriptors to transmit this
3103 			 * packet.  We haven't committed anything yet,
3104 			 * so just unload the DMA map, put the packet
3105 			 * pack on the queue, and punt.  Notify the upper
3106 			 * layer that there are no more slots left.
3107 			 */
3108 			DPRINTF(WM_DEBUG_TX,
3109 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
3110 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
3111 			    segs_needed, sc->sc_txfree - 1));
3112 			ifp->if_flags |= IFF_OACTIVE;
3113 			bus_dmamap_unload(sc->sc_dmat, dmamap);
3114 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3115 			break;
3116 		}
3117 
3118 		IFQ_DEQUEUE(&ifp->if_snd, m0);
3119 
3120 		/*
3121 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3122 		 */
3123 
3124 		DPRINTF(WM_DEBUG_TX,
3125 		    ("%s: TX: packet has %d (%d) DMA segments\n",
3126 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3127 
3128 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3129 
3130 		/*
3131 		 * Store a pointer to the packet so that we can free it
3132 		 * later.
3133 		 *
3134 		 * Initially, we consider the number of descriptors the
3135 		 * packet uses the number of DMA segments.  This may be
3136 		 * incremented by 1 if we do checksum offload (a descriptor
3137 		 * is used to set the checksum context).
3138 		 */
3139 		txs->txs_mbuf = m0;
3140 		txs->txs_firstdesc = sc->sc_txnext;
3141 		txs->txs_ndesc = segs_needed;
3142 
3143 		/* Set up offload parameters for this packet. */
3144 		uint32_t cmdlen, fields, dcmdlen;
3145 		if (m0->m_pkthdr.csum_flags &
3146 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
3147 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3148 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3149 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3150 			    &do_csum) != 0) {
3151 				/* Error message already displayed. */
3152 				bus_dmamap_unload(sc->sc_dmat, dmamap);
3153 				continue;
3154 			}
3155 		} else {
3156 			do_csum = false;
3157 			cmdlen = 0;
3158 			fields = 0;
3159 		}
3160 
3161 		/* Sync the DMA map. */
3162 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3163 		    BUS_DMASYNC_PREWRITE);
3164 
3165 		/*
3166 		 * Initialize the first transmit descriptor.
3167 		 */
3168 		nexttx = sc->sc_txnext;
3169 		if (!do_csum) {
3170 			/* setup a legacy descriptor */
3171 			wm_set_dma_addr(
3172 			    &sc->sc_txdescs[nexttx].wtx_addr,
3173 			    dmamap->dm_segs[0].ds_addr);
3174 			sc->sc_txdescs[nexttx].wtx_cmdlen =
3175 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3176 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3177 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3178 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3179 			    NULL) {
3180 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
3181 				    htole32(WTX_CMD_VLE);
3182 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3183 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3184 			} else {
3185 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3186 			}
3187 			dcmdlen = 0;
3188 		} else {
3189 			/* setup an advanced data descriptor */
3190 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3191 			    htole64(dmamap->dm_segs[0].ds_addr);
3192 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3193 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3194 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3195 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3196 			    htole32(fields);
3197 			DPRINTF(WM_DEBUG_TX,
3198 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3199 			    device_xname(sc->sc_dev), nexttx,
3200 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
3201 			DPRINTF(WM_DEBUG_TX,
3202 			    ("\t 0x%08x%08x\n", fields,
3203 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3204 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3205 		}
3206 
3207 		lasttx = nexttx;
3208 		nexttx = WM_NEXTTX(sc, nexttx);
3209 		/*
3210 		 * fill in the next descriptors. legacy or adcanced format
3211 		 * is the same here
3212 		 */
3213 		for (seg = 1; seg < dmamap->dm_nsegs;
3214 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3215 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3216 			    htole64(dmamap->dm_segs[seg].ds_addr);
3217 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3218 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3219 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3220 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3221 			lasttx = nexttx;
3222 
3223 			DPRINTF(WM_DEBUG_TX,
3224 			    ("%s: TX: desc %d: %#" PRIx64 ", "
3225 			     "len %#04zx\n",
3226 			    device_xname(sc->sc_dev), nexttx,
3227 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
3228 			    dmamap->dm_segs[seg].ds_len));
3229 		}
3230 
3231 		KASSERT(lasttx != -1);
3232 
3233 		/*
3234 		 * Set up the command byte on the last descriptor of
3235 		 * the packet.  If we're in the interrupt delay window,
3236 		 * delay the interrupt.
3237 		 */
3238 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3239 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
3240 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
3241 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
3242 
3243 		txs->txs_lastdesc = lasttx;
3244 
3245 		DPRINTF(WM_DEBUG_TX,
3246 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
3247 		    device_xname(sc->sc_dev),
3248 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3249 
3250 		/* Sync the descriptors we're using. */
3251 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3252 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3253 
3254 		/* Give the packet to the chip. */
3255 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3256 		sent = true;
3257 
3258 		DPRINTF(WM_DEBUG_TX,
3259 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3260 
3261 		DPRINTF(WM_DEBUG_TX,
3262 		    ("%s: TX: finished transmitting packet, job %d\n",
3263 		    device_xname(sc->sc_dev), sc->sc_txsnext));
3264 
3265 		/* Advance the tx pointer. */
3266 		sc->sc_txfree -= txs->txs_ndesc;
3267 		sc->sc_txnext = nexttx;
3268 
3269 		sc->sc_txsfree--;
3270 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3271 
3272 		/* Pass the packet to any BPF listeners. */
3273 		bpf_mtap(ifp, m0);
3274 	}
3275 
3276 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3277 		/* No more slots; notify upper layer. */
3278 		ifp->if_flags |= IFF_OACTIVE;
3279 	}
3280 
3281 	if (sent) {
3282 		/* Set a watchdog timer in case the chip flakes out. */
3283 		ifp->if_timer = 5;
3284 	}
3285 }
3286 
3287 /*
3288  * wm_watchdog:		[ifnet interface function]
3289  *
3290  *	Watchdog timer handler.
3291  */
3292 static void
3293 wm_watchdog(struct ifnet *ifp)
3294 {
3295 	struct wm_softc *sc = ifp->if_softc;
3296 
3297 	/*
3298 	 * Since we're using delayed interrupts, sweep up
3299 	 * before we report an error.
3300 	 */
3301 	wm_txintr(sc);
3302 
3303 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
3304 #ifdef WM_DEBUG
3305 		int i, j;
3306 		struct wm_txsoft *txs;
3307 #endif
3308 		log(LOG_ERR,
3309 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3310 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3311 		    sc->sc_txnext);
3312 		ifp->if_oerrors++;
3313 #ifdef WM_DEBUG
3314 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3315 		    i = WM_NEXTTXS(sc, i)) {
3316 		    txs = &sc->sc_txsoft[i];
3317 		    printf("txs %d tx %d -> %d\n",
3318 			i, txs->txs_firstdesc, txs->txs_lastdesc);
3319 		    for (j = txs->txs_firstdesc; ;
3320 			j = WM_NEXTTX(sc, j)) {
3321 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3322 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3323 			printf("\t %#08x%08x\n",
3324 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3325 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3326 			if (j == txs->txs_lastdesc)
3327 				break;
3328 			}
3329 		}
3330 #endif
3331 		/* Reset the interface. */
3332 		(void) wm_init(ifp);
3333 	}
3334 
3335 	/* Try to get more packets going. */
3336 	ifp->if_start(ifp);
3337 }
3338 
3339 static int
3340 wm_ifflags_cb(struct ethercom *ec)
3341 {
3342 	struct ifnet *ifp = &ec->ec_if;
3343 	struct wm_softc *sc = ifp->if_softc;
3344 	int change = ifp->if_flags ^ sc->sc_if_flags;
3345 
3346 	if (change != 0)
3347 		sc->sc_if_flags = ifp->if_flags;
3348 
3349 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3350 		return ENETRESET;
3351 
3352 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3353 		wm_set_filter(sc);
3354 
3355 	wm_set_vlan(sc);
3356 
3357 	return 0;
3358 }
3359 
3360 /*
3361  * wm_ioctl:		[ifnet interface function]
3362  *
3363  *	Handle control requests from the operator.
3364  */
3365 static int
3366 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3367 {
3368 	struct wm_softc *sc = ifp->if_softc;
3369 	struct ifreq *ifr = (struct ifreq *) data;
3370 	struct ifaddr *ifa = (struct ifaddr *)data;
3371 	struct sockaddr_dl *sdl;
3372 	int s, error;
3373 
3374 	s = splnet();
3375 
3376 	switch (cmd) {
3377 	case SIOCSIFMEDIA:
3378 	case SIOCGIFMEDIA:
3379 		/* Flow control requires full-duplex mode. */
3380 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3381 		    (ifr->ifr_media & IFM_FDX) == 0)
3382 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3383 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3384 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3385 				/* We can do both TXPAUSE and RXPAUSE. */
3386 				ifr->ifr_media |=
3387 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3388 			}
3389 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3390 		}
3391 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3392 		break;
3393 	case SIOCINITIFADDR:
3394 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3395 			sdl = satosdl(ifp->if_dl->ifa_addr);
3396 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3397 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3398 			/* unicast address is first multicast entry */
3399 			wm_set_filter(sc);
3400 			error = 0;
3401 			break;
3402 		}
3403 		/*FALLTHROUGH*/
3404 	default:
3405 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3406 			break;
3407 
3408 		error = 0;
3409 
3410 		if (cmd == SIOCSIFCAP)
3411 			error = (*ifp->if_init)(ifp);
3412 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3413 			;
3414 		else if (ifp->if_flags & IFF_RUNNING) {
3415 			/*
3416 			 * Multicast list has changed; set the hardware filter
3417 			 * accordingly.
3418 			 */
3419 			wm_set_filter(sc);
3420 		}
3421 		break;
3422 	}
3423 
3424 	/* Try to get more packets going. */
3425 	ifp->if_start(ifp);
3426 
3427 	splx(s);
3428 	return error;
3429 }
3430 
3431 /*
3432  * wm_intr:
3433  *
3434  *	Interrupt service routine.
3435  */
3436 static int
3437 wm_intr(void *arg)
3438 {
3439 	struct wm_softc *sc = arg;
3440 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3441 	uint32_t icr;
3442 	int handled = 0;
3443 
3444 	while (1 /* CONSTCOND */) {
3445 		icr = CSR_READ(sc, WMREG_ICR);
3446 		if ((icr & sc->sc_icr) == 0)
3447 			break;
3448 		rnd_add_uint32(&sc->rnd_source, icr);
3449 
3450 		handled = 1;
3451 
3452 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3453 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3454 			DPRINTF(WM_DEBUG_RX,
3455 			    ("%s: RX: got Rx intr 0x%08x\n",
3456 			    device_xname(sc->sc_dev),
3457 			    icr & (ICR_RXDMT0|ICR_RXT0)));
3458 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3459 		}
3460 #endif
3461 		wm_rxintr(sc);
3462 
3463 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3464 		if (icr & ICR_TXDW) {
3465 			DPRINTF(WM_DEBUG_TX,
3466 			    ("%s: TX: got TXDW interrupt\n",
3467 			    device_xname(sc->sc_dev)));
3468 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
3469 		}
3470 #endif
3471 		wm_txintr(sc);
3472 
3473 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3474 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3475 			wm_linkintr(sc, icr);
3476 		}
3477 
3478 		if (icr & ICR_RXO) {
3479 #if defined(WM_DEBUG)
3480 			log(LOG_WARNING, "%s: Receive overrun\n",
3481 			    device_xname(sc->sc_dev));
3482 #endif /* defined(WM_DEBUG) */
3483 		}
3484 	}
3485 
3486 	if (handled) {
3487 		/* Try to get more packets going. */
3488 		ifp->if_start(ifp);
3489 	}
3490 
3491 	return handled;
3492 }
3493 
3494 /*
3495  * wm_txintr:
3496  *
3497  *	Helper; handle transmit interrupts.
3498  */
3499 static void
3500 wm_txintr(struct wm_softc *sc)
3501 {
3502 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3503 	struct wm_txsoft *txs;
3504 	uint8_t status;
3505 	int i;
3506 
3507 	ifp->if_flags &= ~IFF_OACTIVE;
3508 
3509 	/*
3510 	 * Go through the Tx list and free mbufs for those
3511 	 * frames which have been transmitted.
3512 	 */
3513 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3514 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3515 		txs = &sc->sc_txsoft[i];
3516 
3517 		DPRINTF(WM_DEBUG_TX,
3518 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3519 
3520 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3521 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3522 
3523 		status =
3524 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3525 		if ((status & WTX_ST_DD) == 0) {
3526 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3527 			    BUS_DMASYNC_PREREAD);
3528 			break;
3529 		}
3530 
3531 		DPRINTF(WM_DEBUG_TX,
3532 		    ("%s: TX: job %d done: descs %d..%d\n",
3533 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3534 		    txs->txs_lastdesc));
3535 
3536 		/*
3537 		 * XXX We should probably be using the statistics
3538 		 * XXX registers, but I don't know if they exist
3539 		 * XXX on chips before the i82544.
3540 		 */
3541 
3542 #ifdef WM_EVENT_COUNTERS
3543 		if (status & WTX_ST_TU)
3544 			WM_EVCNT_INCR(&sc->sc_ev_tu);
3545 #endif /* WM_EVENT_COUNTERS */
3546 
3547 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
3548 			ifp->if_oerrors++;
3549 			if (status & WTX_ST_LC)
3550 				log(LOG_WARNING, "%s: late collision\n",
3551 				    device_xname(sc->sc_dev));
3552 			else if (status & WTX_ST_EC) {
3553 				ifp->if_collisions += 16;
3554 				log(LOG_WARNING, "%s: excessive collisions\n",
3555 				    device_xname(sc->sc_dev));
3556 			}
3557 		} else
3558 			ifp->if_opackets++;
3559 
3560 		sc->sc_txfree += txs->txs_ndesc;
3561 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3562 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3563 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3564 		m_freem(txs->txs_mbuf);
3565 		txs->txs_mbuf = NULL;
3566 	}
3567 
3568 	/* Update the dirty transmit buffer pointer. */
3569 	sc->sc_txsdirty = i;
3570 	DPRINTF(WM_DEBUG_TX,
3571 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3572 
3573 	/*
3574 	 * If there are no more pending transmissions, cancel the watchdog
3575 	 * timer.
3576 	 */
3577 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3578 		ifp->if_timer = 0;
3579 }
3580 
3581 /*
3582  * wm_rxintr:
3583  *
3584  *	Helper; handle receive interrupts.
3585  */
3586 static void
3587 wm_rxintr(struct wm_softc *sc)
3588 {
3589 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3590 	struct wm_rxsoft *rxs;
3591 	struct mbuf *m;
3592 	int i, len;
3593 	uint8_t status, errors;
3594 	uint16_t vlantag;
3595 
3596 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3597 		rxs = &sc->sc_rxsoft[i];
3598 
3599 		DPRINTF(WM_DEBUG_RX,
3600 		    ("%s: RX: checking descriptor %d\n",
3601 		    device_xname(sc->sc_dev), i));
3602 
3603 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3604 
3605 		status = sc->sc_rxdescs[i].wrx_status;
3606 		errors = sc->sc_rxdescs[i].wrx_errors;
3607 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
3608 		vlantag = sc->sc_rxdescs[i].wrx_special;
3609 
3610 		if ((status & WRX_ST_DD) == 0) {
3611 			/*
3612 			 * We have processed all of the receive descriptors.
3613 			 */
3614 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3615 			break;
3616 		}
3617 
3618 		if (__predict_false(sc->sc_rxdiscard)) {
3619 			DPRINTF(WM_DEBUG_RX,
3620 			    ("%s: RX: discarding contents of descriptor %d\n",
3621 			    device_xname(sc->sc_dev), i));
3622 			WM_INIT_RXDESC(sc, i);
3623 			if (status & WRX_ST_EOP) {
3624 				/* Reset our state. */
3625 				DPRINTF(WM_DEBUG_RX,
3626 				    ("%s: RX: resetting rxdiscard -> 0\n",
3627 				    device_xname(sc->sc_dev)));
3628 				sc->sc_rxdiscard = 0;
3629 			}
3630 			continue;
3631 		}
3632 
3633 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3634 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3635 
3636 		m = rxs->rxs_mbuf;
3637 
3638 		/*
3639 		 * Add a new receive buffer to the ring, unless of
3640 		 * course the length is zero. Treat the latter as a
3641 		 * failed mapping.
3642 		 */
3643 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3644 			/*
3645 			 * Failed, throw away what we've done so
3646 			 * far, and discard the rest of the packet.
3647 			 */
3648 			ifp->if_ierrors++;
3649 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3650 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3651 			WM_INIT_RXDESC(sc, i);
3652 			if ((status & WRX_ST_EOP) == 0)
3653 				sc->sc_rxdiscard = 1;
3654 			if (sc->sc_rxhead != NULL)
3655 				m_freem(sc->sc_rxhead);
3656 			WM_RXCHAIN_RESET(sc);
3657 			DPRINTF(WM_DEBUG_RX,
3658 			    ("%s: RX: Rx buffer allocation failed, "
3659 			    "dropping packet%s\n", device_xname(sc->sc_dev),
3660 			    sc->sc_rxdiscard ? " (discard)" : ""));
3661 			continue;
3662 		}
3663 
3664 		m->m_len = len;
3665 		sc->sc_rxlen += len;
3666 		DPRINTF(WM_DEBUG_RX,
3667 		    ("%s: RX: buffer at %p len %d\n",
3668 		    device_xname(sc->sc_dev), m->m_data, len));
3669 
3670 		/*
3671 		 * If this is not the end of the packet, keep
3672 		 * looking.
3673 		 */
3674 		if ((status & WRX_ST_EOP) == 0) {
3675 			WM_RXCHAIN_LINK(sc, m);
3676 			DPRINTF(WM_DEBUG_RX,
3677 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
3678 			    device_xname(sc->sc_dev), sc->sc_rxlen));
3679 			continue;
3680 		}
3681 
3682 		/*
3683 		 * Okay, we have the entire packet now.  The chip is
3684 		 * configured to include the FCS except I350 and I21[01]
3685 		 * (not all chips can be configured to strip it),
3686 		 * so we need to trim it.
3687 		 * May need to adjust length of previous mbuf in the
3688 		 * chain if the current mbuf is too short.
3689 		 * For an eratta, the RCTL_SECRC bit in RCTL register
3690 		 * is always set in I350, so we don't trim it.
3691 		 */
3692 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I210)
3693 		    && (sc->sc_type != WM_T_I211)) {
3694 			if (m->m_len < ETHER_CRC_LEN) {
3695 				sc->sc_rxtail->m_len
3696 				    -= (ETHER_CRC_LEN - m->m_len);
3697 				m->m_len = 0;
3698 			} else
3699 				m->m_len -= ETHER_CRC_LEN;
3700 			len = sc->sc_rxlen - ETHER_CRC_LEN;
3701 		} else
3702 			len = sc->sc_rxlen;
3703 
3704 		WM_RXCHAIN_LINK(sc, m);
3705 
3706 		*sc->sc_rxtailp = NULL;
3707 		m = sc->sc_rxhead;
3708 
3709 		WM_RXCHAIN_RESET(sc);
3710 
3711 		DPRINTF(WM_DEBUG_RX,
3712 		    ("%s: RX: have entire packet, len -> %d\n",
3713 		    device_xname(sc->sc_dev), len));
3714 
3715 		/*
3716 		 * If an error occurred, update stats and drop the packet.
3717 		 */
3718 		if (errors &
3719 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3720 			if (errors & WRX_ER_SE)
3721 				log(LOG_WARNING, "%s: symbol error\n",
3722 				    device_xname(sc->sc_dev));
3723 			else if (errors & WRX_ER_SEQ)
3724 				log(LOG_WARNING, "%s: receive sequence error\n",
3725 				    device_xname(sc->sc_dev));
3726 			else if (errors & WRX_ER_CE)
3727 				log(LOG_WARNING, "%s: CRC error\n",
3728 				    device_xname(sc->sc_dev));
3729 			m_freem(m);
3730 			continue;
3731 		}
3732 
3733 		/*
3734 		 * No errors.  Receive the packet.
3735 		 */
3736 		m->m_pkthdr.rcvif = ifp;
3737 		m->m_pkthdr.len = len;
3738 
3739 		/*
3740 		 * If VLANs are enabled, VLAN packets have been unwrapped
3741 		 * for us.  Associate the tag with the packet.
3742 		 */
3743 		if ((status & WRX_ST_VP) != 0) {
3744 			VLAN_INPUT_TAG(ifp, m,
3745 			    le16toh(vlantag),
3746 			    continue);
3747 		}
3748 
3749 		/*
3750 		 * Set up checksum info for this packet.
3751 		 */
3752 		if ((status & WRX_ST_IXSM) == 0) {
3753 			if (status & WRX_ST_IPCS) {
3754 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3755 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3756 				if (errors & WRX_ER_IPE)
3757 					m->m_pkthdr.csum_flags |=
3758 					    M_CSUM_IPv4_BAD;
3759 			}
3760 			if (status & WRX_ST_TCPCS) {
3761 				/*
3762 				 * Note: we don't know if this was TCP or UDP,
3763 				 * so we just set both bits, and expect the
3764 				 * upper layers to deal.
3765 				 */
3766 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3767 				m->m_pkthdr.csum_flags |=
3768 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3769 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
3770 				if (errors & WRX_ER_TCPE)
3771 					m->m_pkthdr.csum_flags |=
3772 					    M_CSUM_TCP_UDP_BAD;
3773 			}
3774 		}
3775 
3776 		ifp->if_ipackets++;
3777 
3778 		/* Pass this up to any BPF listeners. */
3779 		bpf_mtap(ifp, m);
3780 
3781 		/* Pass it on. */
3782 		(*ifp->if_input)(ifp, m);
3783 	}
3784 
3785 	/* Update the receive pointer. */
3786 	sc->sc_rxptr = i;
3787 
3788 	DPRINTF(WM_DEBUG_RX,
3789 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3790 }
3791 
3792 /*
3793  * wm_linkintr_gmii:
3794  *
3795  *	Helper; handle link interrupts for GMII.
3796  */
3797 static void
3798 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3799 {
3800 
3801 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3802 		__func__));
3803 
3804 	if (icr & ICR_LSC) {
3805 		DPRINTF(WM_DEBUG_LINK,
3806 		    ("%s: LINK: LSC -> mii_tick\n",
3807 			device_xname(sc->sc_dev)));
3808 		mii_tick(&sc->sc_mii);
3809 		if (sc->sc_type == WM_T_82543) {
3810 			int miistatus, active;
3811 
3812 			/*
3813 			 * With 82543, we need to force speed and
3814 			 * duplex on the MAC equal to what the PHY
3815 			 * speed and duplex configuration is.
3816 			 */
3817 			miistatus = sc->sc_mii.mii_media_status;
3818 
3819 			if (miistatus & IFM_ACTIVE) {
3820 				active = sc->sc_mii.mii_media_active;
3821 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3822 				switch (IFM_SUBTYPE(active)) {
3823 				case IFM_10_T:
3824 					sc->sc_ctrl |= CTRL_SPEED_10;
3825 					break;
3826 				case IFM_100_TX:
3827 					sc->sc_ctrl |= CTRL_SPEED_100;
3828 					break;
3829 				case IFM_1000_T:
3830 					sc->sc_ctrl |= CTRL_SPEED_1000;
3831 					break;
3832 				default:
3833 					/*
3834 					 * fiber?
3835 					 * Shoud not enter here.
3836 					 */
3837 					printf("unknown media (%x)\n",
3838 					    active);
3839 					break;
3840 				}
3841 				if (active & IFM_FDX)
3842 					sc->sc_ctrl |= CTRL_FD;
3843 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3844 			}
3845 		} else if ((sc->sc_type == WM_T_ICH8)
3846 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
3847 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
3848 		} else if (sc->sc_type == WM_T_PCH) {
3849 			wm_k1_gig_workaround_hv(sc,
3850 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3851 		}
3852 
3853 		if ((sc->sc_phytype == WMPHY_82578)
3854 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3855 			== IFM_1000_T)) {
3856 
3857 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3858 				delay(200*1000); /* XXX too big */
3859 
3860 				/* Link stall fix for link up */
3861 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3862 				    HV_MUX_DATA_CTRL,
3863 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
3864 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
3865 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3866 				    HV_MUX_DATA_CTRL,
3867 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
3868 			}
3869 		}
3870 	} else if (icr & ICR_RXSEQ) {
3871 		DPRINTF(WM_DEBUG_LINK,
3872 		    ("%s: LINK Receive sequence error\n",
3873 			device_xname(sc->sc_dev)));
3874 	}
3875 }
3876 
3877 /*
3878  * wm_linkintr_tbi:
3879  *
3880  *	Helper; handle link interrupts for TBI mode.
3881  */
3882 static void
3883 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3884 {
3885 	uint32_t status;
3886 
3887 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3888 		__func__));
3889 
3890 	status = CSR_READ(sc, WMREG_STATUS);
3891 	if (icr & ICR_LSC) {
3892 		if (status & STATUS_LU) {
3893 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3894 			    device_xname(sc->sc_dev),
3895 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3896 			/*
3897 			 * NOTE: CTRL will update TFCE and RFCE automatically,
3898 			 * so we should update sc->sc_ctrl
3899 			 */
3900 
3901 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3902 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3903 			sc->sc_fcrtl &= ~FCRTL_XONE;
3904 			if (status & STATUS_FD)
3905 				sc->sc_tctl |=
3906 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3907 			else
3908 				sc->sc_tctl |=
3909 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3910 			if (sc->sc_ctrl & CTRL_TFCE)
3911 				sc->sc_fcrtl |= FCRTL_XONE;
3912 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3913 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3914 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3915 				      sc->sc_fcrtl);
3916 			sc->sc_tbi_linkup = 1;
3917 		} else {
3918 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3919 			    device_xname(sc->sc_dev)));
3920 			sc->sc_tbi_linkup = 0;
3921 		}
3922 		wm_tbi_set_linkled(sc);
3923 	} else if (icr & ICR_RXCFG) {
3924 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3925 		    device_xname(sc->sc_dev)));
3926 		sc->sc_tbi_nrxcfg++;
3927 		wm_check_for_link(sc);
3928 	} else if (icr & ICR_RXSEQ) {
3929 		DPRINTF(WM_DEBUG_LINK,
3930 		    ("%s: LINK: Receive sequence error\n",
3931 		    device_xname(sc->sc_dev)));
3932 	}
3933 }
3934 
3935 /*
3936  * wm_linkintr:
3937  *
3938  *	Helper; handle link interrupts.
3939  */
3940 static void
3941 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3942 {
3943 
3944 	if (sc->sc_flags & WM_F_HAS_MII)
3945 		wm_linkintr_gmii(sc, icr);
3946 	else
3947 		wm_linkintr_tbi(sc, icr);
3948 }
3949 
3950 /*
3951  * wm_tick:
3952  *
3953  *	One second timer, used to check link status, sweep up
3954  *	completed transmit jobs, etc.
3955  */
3956 static void
3957 wm_tick(void *arg)
3958 {
3959 	struct wm_softc *sc = arg;
3960 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3961 	int s;
3962 
3963 	s = splnet();
3964 
3965 	if (sc->sc_type >= WM_T_82542_2_1) {
3966 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3967 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3968 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3969 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3970 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3971 	}
3972 
3973 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3974 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
3975 	    + CSR_READ(sc, WMREG_CRCERRS)
3976 	    + CSR_READ(sc, WMREG_ALGNERRC)
3977 	    + CSR_READ(sc, WMREG_SYMERRC)
3978 	    + CSR_READ(sc, WMREG_RXERRC)
3979 	    + CSR_READ(sc, WMREG_SEC)
3980 	    + CSR_READ(sc, WMREG_CEXTERR)
3981 	    + CSR_READ(sc, WMREG_RLEC);
3982 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
3983 
3984 	if (sc->sc_flags & WM_F_HAS_MII)
3985 		mii_tick(&sc->sc_mii);
3986 	else
3987 		wm_tbi_check_link(sc);
3988 
3989 	splx(s);
3990 
3991 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3992 }
3993 
3994 /*
3995  * wm_reset:
3996  *
3997  *	Reset the i82542 chip.
3998  */
3999 static void
4000 wm_reset(struct wm_softc *sc)
4001 {
4002 	int phy_reset = 0;
4003 	uint32_t reg, mask;
4004 	int i;
4005 
4006 	/*
4007 	 * Allocate on-chip memory according to the MTU size.
4008 	 * The Packet Buffer Allocation register must be written
4009 	 * before the chip is reset.
4010 	 */
4011 	switch (sc->sc_type) {
4012 	case WM_T_82547:
4013 	case WM_T_82547_2:
4014 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4015 		    PBA_22K : PBA_30K;
4016 		sc->sc_txfifo_head = 0;
4017 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4018 		sc->sc_txfifo_size =
4019 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4020 		sc->sc_txfifo_stall = 0;
4021 		break;
4022 	case WM_T_82571:
4023 	case WM_T_82572:
4024 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4025 	case WM_T_I350:
4026 	case WM_T_80003:
4027 		sc->sc_pba = PBA_32K;
4028 		break;
4029 	case WM_T_82580:
4030 	case WM_T_82580ER:
4031 		sc->sc_pba = PBA_35K;
4032 		break;
4033 	case WM_T_I210:
4034 	case WM_T_I211:
4035 		sc->sc_pba = PBA_34K;
4036 		break;
4037 	case WM_T_82576:
4038 		sc->sc_pba = PBA_64K;
4039 		break;
4040 	case WM_T_82573:
4041 		sc->sc_pba = PBA_12K;
4042 		break;
4043 	case WM_T_82574:
4044 	case WM_T_82583:
4045 		sc->sc_pba = PBA_20K;
4046 		break;
4047 	case WM_T_ICH8:
4048 		sc->sc_pba = PBA_8K;
4049 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4050 		break;
4051 	case WM_T_ICH9:
4052 	case WM_T_ICH10:
4053 		sc->sc_pba = PBA_10K;
4054 		break;
4055 	case WM_T_PCH:
4056 	case WM_T_PCH2:
4057 		sc->sc_pba = PBA_26K;
4058 		break;
4059 	default:
4060 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4061 		    PBA_40K : PBA_48K;
4062 		break;
4063 	}
4064 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4065 
4066 	/* Prevent the PCI-E bus from sticking */
4067 	if (sc->sc_flags & WM_F_PCIE) {
4068 		int timeout = 800;
4069 
4070 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4071 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4072 
4073 		while (timeout--) {
4074 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4075 			    == 0)
4076 				break;
4077 			delay(100);
4078 		}
4079 	}
4080 
4081 	/* Set the completion timeout for interface */
4082 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4083 	    || (sc->sc_type == WM_T_I350))
4084 		wm_set_pcie_completion_timeout(sc);
4085 
4086 	/* Clear interrupt */
4087 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4088 
4089 	/* Stop the transmit and receive processes. */
4090 	CSR_WRITE(sc, WMREG_RCTL, 0);
4091 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4092 	sc->sc_rctl &= ~RCTL_EN;
4093 
4094 	/* XXX set_tbi_sbp_82543() */
4095 
4096 	delay(10*1000);
4097 
4098 	/* Must acquire the MDIO ownership before MAC reset */
4099 	switch (sc->sc_type) {
4100 	case WM_T_82573:
4101 	case WM_T_82574:
4102 	case WM_T_82583:
4103 		i = 0;
4104 		reg = CSR_READ(sc, WMREG_EXTCNFCTR)
4105 		    | EXTCNFCTR_MDIO_SW_OWNERSHIP;
4106 		do {
4107 			CSR_WRITE(sc, WMREG_EXTCNFCTR,
4108 			    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
4109 			reg = CSR_READ(sc, WMREG_EXTCNFCTR);
4110 			if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
4111 				break;
4112 			reg |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
4113 			delay(2*1000);
4114 			i++;
4115 		} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
4116 		break;
4117 	default:
4118 		break;
4119 	}
4120 
4121 	/*
4122 	 * 82541 Errata 29? & 82547 Errata 28?
4123 	 * See also the description about PHY_RST bit in CTRL register
4124 	 * in 8254x_GBe_SDM.pdf.
4125 	 */
4126 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4127 		CSR_WRITE(sc, WMREG_CTRL,
4128 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4129 		delay(5000);
4130 	}
4131 
4132 	switch (sc->sc_type) {
4133 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4134 	case WM_T_82541:
4135 	case WM_T_82541_2:
4136 	case WM_T_82547:
4137 	case WM_T_82547_2:
4138 		/*
4139 		 * On some chipsets, a reset through a memory-mapped write
4140 		 * cycle can cause the chip to reset before completing the
4141 		 * write cycle.  This causes major headache that can be
4142 		 * avoided by issuing the reset via indirect register writes
4143 		 * through I/O space.
4144 		 *
4145 		 * So, if we successfully mapped the I/O BAR at attach time,
4146 		 * use that.  Otherwise, try our luck with a memory-mapped
4147 		 * reset.
4148 		 */
4149 		if (sc->sc_flags & WM_F_IOH_VALID)
4150 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4151 		else
4152 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4153 		break;
4154 	case WM_T_82545_3:
4155 	case WM_T_82546_3:
4156 		/* Use the shadow control register on these chips. */
4157 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4158 		break;
4159 	case WM_T_80003:
4160 		mask = swfwphysem[sc->sc_funcid];
4161 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4162 		wm_get_swfw_semaphore(sc, mask);
4163 		CSR_WRITE(sc, WMREG_CTRL, reg);
4164 		wm_put_swfw_semaphore(sc, mask);
4165 		break;
4166 	case WM_T_ICH8:
4167 	case WM_T_ICH9:
4168 	case WM_T_ICH10:
4169 	case WM_T_PCH:
4170 	case WM_T_PCH2:
4171 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4172 		if (wm_check_reset_block(sc) == 0) {
4173 			/*
4174 			 * Gate automatic PHY configuration by hardware on
4175 			 * non-managed 82579
4176 			 */
4177 			if ((sc->sc_type == WM_T_PCH2)
4178 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4179 				!= 0))
4180 				wm_gate_hw_phy_config_ich8lan(sc, 1);
4181 
4182 
4183 			reg |= CTRL_PHY_RESET;
4184 			phy_reset = 1;
4185 		}
4186 		wm_get_swfwhw_semaphore(sc);
4187 		CSR_WRITE(sc, WMREG_CTRL, reg);
4188 		delay(20*1000);
4189 		wm_put_swfwhw_semaphore(sc);
4190 		break;
4191 	case WM_T_82542_2_0:
4192 	case WM_T_82542_2_1:
4193 	case WM_T_82543:
4194 	case WM_T_82540:
4195 	case WM_T_82545:
4196 	case WM_T_82546:
4197 	case WM_T_82571:
4198 	case WM_T_82572:
4199 	case WM_T_82573:
4200 	case WM_T_82574:
4201 	case WM_T_82575:
4202 	case WM_T_82576:
4203 	case WM_T_82580:
4204 	case WM_T_82580ER:
4205 	case WM_T_82583:
4206 	case WM_T_I350:
4207 	case WM_T_I210:
4208 	case WM_T_I211:
4209 	default:
4210 		/* Everything else can safely use the documented method. */
4211 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4212 		break;
4213 	}
4214 
4215 	if (phy_reset != 0)
4216 		wm_get_cfg_done(sc);
4217 
4218 	/* reload EEPROM */
4219 	switch (sc->sc_type) {
4220 	case WM_T_82542_2_0:
4221 	case WM_T_82542_2_1:
4222 	case WM_T_82543:
4223 	case WM_T_82544:
4224 		delay(10);
4225 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4226 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4227 		delay(2000);
4228 		break;
4229 	case WM_T_82540:
4230 	case WM_T_82545:
4231 	case WM_T_82545_3:
4232 	case WM_T_82546:
4233 	case WM_T_82546_3:
4234 		delay(5*1000);
4235 		/* XXX Disable HW ARPs on ASF enabled adapters */
4236 		break;
4237 	case WM_T_82541:
4238 	case WM_T_82541_2:
4239 	case WM_T_82547:
4240 	case WM_T_82547_2:
4241 		delay(20000);
4242 		/* XXX Disable HW ARPs on ASF enabled adapters */
4243 		break;
4244 	case WM_T_82571:
4245 	case WM_T_82572:
4246 	case WM_T_82573:
4247 	case WM_T_82574:
4248 	case WM_T_82583:
4249 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4250 			delay(10);
4251 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4252 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4253 		}
4254 		/* check EECD_EE_AUTORD */
4255 		wm_get_auto_rd_done(sc);
4256 		/*
4257 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4258 		 * is set.
4259 		 */
4260 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4261 		    || (sc->sc_type == WM_T_82583))
4262 			delay(25*1000);
4263 		break;
4264 	case WM_T_82575:
4265 	case WM_T_82576:
4266 	case WM_T_82580:
4267 	case WM_T_82580ER:
4268 	case WM_T_I350:
4269 	case WM_T_I210:
4270 	case WM_T_I211:
4271 	case WM_T_80003:
4272 	case WM_T_ICH8:
4273 	case WM_T_ICH9:
4274 		/* check EECD_EE_AUTORD */
4275 		wm_get_auto_rd_done(sc);
4276 		break;
4277 	case WM_T_ICH10:
4278 	case WM_T_PCH:
4279 	case WM_T_PCH2:
4280 		wm_lan_init_done(sc);
4281 		break;
4282 	default:
4283 		panic("%s: unknown type\n", __func__);
4284 	}
4285 
4286 	/* Check whether EEPROM is present or not */
4287 	switch (sc->sc_type) {
4288 	case WM_T_82575:
4289 	case WM_T_82576:
4290 #if 0 /* XXX */
4291 	case WM_T_82580:
4292 	case WM_T_82580ER:
4293 #endif
4294 	case WM_T_I350:
4295 	case WM_T_ICH8:
4296 	case WM_T_ICH9:
4297 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4298 			/* Not found */
4299 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4300 			if ((sc->sc_type == WM_T_82575)
4301 			    || (sc->sc_type == WM_T_82576)
4302 			    || (sc->sc_type == WM_T_82580)
4303 			    || (sc->sc_type == WM_T_82580ER)
4304 			    || (sc->sc_type == WM_T_I350))
4305 				wm_reset_init_script_82575(sc);
4306 		}
4307 		break;
4308 	default:
4309 		break;
4310 	}
4311 
4312 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4313 	    || (sc->sc_type == WM_T_I350)) {
4314 		/* clear global device reset status bit */
4315 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4316 	}
4317 
4318 	/* Clear any pending interrupt events. */
4319 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4320 	reg = CSR_READ(sc, WMREG_ICR);
4321 
4322 	/* reload sc_ctrl */
4323 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4324 
4325 	if (sc->sc_type == WM_T_I350)
4326 		wm_set_eee_i350(sc);
4327 
4328 	/* dummy read from WUC */
4329 	if (sc->sc_type == WM_T_PCH)
4330 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4331 	/*
4332 	 * For PCH, this write will make sure that any noise will be detected
4333 	 * as a CRC error and be dropped rather than show up as a bad packet
4334 	 * to the DMA engine
4335 	 */
4336 	if (sc->sc_type == WM_T_PCH)
4337 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4338 
4339 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4340 		CSR_WRITE(sc, WMREG_WUC, 0);
4341 
4342 	/* XXX need special handling for 82580 */
4343 }
4344 
4345 static void
4346 wm_set_vlan(struct wm_softc *sc)
4347 {
4348 	/* Deal with VLAN enables. */
4349 	if (VLAN_ATTACHED(&sc->sc_ethercom))
4350 		sc->sc_ctrl |= CTRL_VME;
4351 	else
4352 		sc->sc_ctrl &= ~CTRL_VME;
4353 
4354 	/* Write the control registers. */
4355 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4356 }
4357 
4358 /*
4359  * wm_init:		[ifnet interface function]
4360  *
4361  *	Initialize the interface.  Must be called at splnet().
4362  */
4363 static int
4364 wm_init(struct ifnet *ifp)
4365 {
4366 	struct wm_softc *sc = ifp->if_softc;
4367 	struct wm_rxsoft *rxs;
4368 	int i, j, trynum, error = 0;
4369 	uint32_t reg;
4370 
4371 	/*
4372 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4373 	 * There is a small but measurable benefit to avoiding the adjusment
4374 	 * of the descriptor so that the headers are aligned, for normal mtu,
4375 	 * on such platforms.  One possibility is that the DMA itself is
4376 	 * slightly more efficient if the front of the entire packet (instead
4377 	 * of the front of the headers) is aligned.
4378 	 *
4379 	 * Note we must always set align_tweak to 0 if we are using
4380 	 * jumbo frames.
4381 	 */
4382 #ifdef __NO_STRICT_ALIGNMENT
4383 	sc->sc_align_tweak = 0;
4384 #else
4385 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4386 		sc->sc_align_tweak = 0;
4387 	else
4388 		sc->sc_align_tweak = 2;
4389 #endif /* __NO_STRICT_ALIGNMENT */
4390 
4391 	/* Cancel any pending I/O. */
4392 	wm_stop(ifp, 0);
4393 
4394 	/* update statistics before reset */
4395 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4396 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4397 
4398 	/* Reset the chip to a known state. */
4399 	wm_reset(sc);
4400 
4401 	switch (sc->sc_type) {
4402 	case WM_T_82571:
4403 	case WM_T_82572:
4404 	case WM_T_82573:
4405 	case WM_T_82574:
4406 	case WM_T_82583:
4407 	case WM_T_80003:
4408 	case WM_T_ICH8:
4409 	case WM_T_ICH9:
4410 	case WM_T_ICH10:
4411 	case WM_T_PCH:
4412 	case WM_T_PCH2:
4413 		if (wm_check_mng_mode(sc) != 0)
4414 			wm_get_hw_control(sc);
4415 		break;
4416 	default:
4417 		break;
4418 	}
4419 
4420 	/* Reset the PHY. */
4421 	if (sc->sc_flags & WM_F_HAS_MII)
4422 		wm_gmii_reset(sc);
4423 
4424 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
4425 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
4426 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
4427 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4428 
4429 	/* Initialize the transmit descriptor ring. */
4430 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4431 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4432 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4433 	sc->sc_txfree = WM_NTXDESC(sc);
4434 	sc->sc_txnext = 0;
4435 
4436 	if (sc->sc_type < WM_T_82543) {
4437 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4438 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4439 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4440 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4441 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4442 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4443 	} else {
4444 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4445 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4446 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4447 		CSR_WRITE(sc, WMREG_TDH, 0);
4448 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
4449 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
4450 
4451 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4452 			/*
4453 			 * Don't write TDT before TCTL.EN is set.
4454 			 * See the document.
4455 			 */
4456 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4457 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4458 			    | TXDCTL_WTHRESH(0));
4459 		else {
4460 			CSR_WRITE(sc, WMREG_TDT, 0);
4461 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4462 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4463 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4464 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4465 		}
4466 	}
4467 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4468 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4469 
4470 	/* Initialize the transmit job descriptors. */
4471 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4472 		sc->sc_txsoft[i].txs_mbuf = NULL;
4473 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
4474 	sc->sc_txsnext = 0;
4475 	sc->sc_txsdirty = 0;
4476 
4477 	/*
4478 	 * Initialize the receive descriptor and receive job
4479 	 * descriptor rings.
4480 	 */
4481 	if (sc->sc_type < WM_T_82543) {
4482 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4483 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4484 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4485 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4486 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4487 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4488 
4489 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4490 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4491 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4492 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4493 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4494 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4495 	} else {
4496 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4497 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4498 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4499 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4500 			CSR_WRITE(sc, WMREG_EITR(0), 450);
4501 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4502 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4503 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4504 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4505 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4506 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4507 			    | RXDCTL_WTHRESH(1));
4508 		} else {
4509 			CSR_WRITE(sc, WMREG_RDH, 0);
4510 			CSR_WRITE(sc, WMREG_RDT, 0);
4511 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4512 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
4513 		}
4514 	}
4515 	for (i = 0; i < WM_NRXDESC; i++) {
4516 		rxs = &sc->sc_rxsoft[i];
4517 		if (rxs->rxs_mbuf == NULL) {
4518 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
4519 				log(LOG_ERR, "%s: unable to allocate or map "
4520 				    "rx buffer %d, error = %d\n",
4521 				    device_xname(sc->sc_dev), i, error);
4522 				/*
4523 				 * XXX Should attempt to run with fewer receive
4524 				 * XXX buffers instead of just failing.
4525 				 */
4526 				wm_rxdrain(sc);
4527 				goto out;
4528 			}
4529 		} else {
4530 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4531 				WM_INIT_RXDESC(sc, i);
4532 			/*
4533 			 * For 82575 and newer device, the RX descriptors
4534 			 * must be initialized after the setting of RCTL.EN in
4535 			 * wm_set_filter()
4536 			 */
4537 		}
4538 	}
4539 	sc->sc_rxptr = 0;
4540 	sc->sc_rxdiscard = 0;
4541 	WM_RXCHAIN_RESET(sc);
4542 
4543 	/*
4544 	 * Clear out the VLAN table -- we don't use it (yet).
4545 	 */
4546 	CSR_WRITE(sc, WMREG_VET, 0);
4547 	if (sc->sc_type == WM_T_I350)
4548 		trynum = 10; /* Due to hw errata */
4549 	else
4550 		trynum = 1;
4551 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4552 		for (j = 0; j < trynum; j++)
4553 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4554 
4555 	/*
4556 	 * Set up flow-control parameters.
4557 	 *
4558 	 * XXX Values could probably stand some tuning.
4559 	 */
4560 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4561 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4562 	    && (sc->sc_type != WM_T_PCH2)) {
4563 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4564 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4565 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4566 	}
4567 
4568 	sc->sc_fcrtl = FCRTL_DFLT;
4569 	if (sc->sc_type < WM_T_82543) {
4570 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4571 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4572 	} else {
4573 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4574 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4575 	}
4576 
4577 	if (sc->sc_type == WM_T_80003)
4578 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4579 	else
4580 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4581 
4582 	/* Writes the control register. */
4583 	wm_set_vlan(sc);
4584 
4585 	if (sc->sc_flags & WM_F_HAS_MII) {
4586 		int val;
4587 
4588 		switch (sc->sc_type) {
4589 		case WM_T_80003:
4590 		case WM_T_ICH8:
4591 		case WM_T_ICH9:
4592 		case WM_T_ICH10:
4593 		case WM_T_PCH:
4594 		case WM_T_PCH2:
4595 			/*
4596 			 * Set the mac to wait the maximum time between each
4597 			 * iteration and increase the max iterations when
4598 			 * polling the phy; this fixes erroneous timeouts at
4599 			 * 10Mbps.
4600 			 */
4601 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4602 			    0xFFFF);
4603 			val = wm_kmrn_readreg(sc,
4604 			    KUMCTRLSTA_OFFSET_INB_PARAM);
4605 			val |= 0x3F;
4606 			wm_kmrn_writereg(sc,
4607 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4608 			break;
4609 		default:
4610 			break;
4611 		}
4612 
4613 		if (sc->sc_type == WM_T_80003) {
4614 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4615 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4616 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4617 
4618 			/* Bypass RX and TX FIFO's */
4619 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4620 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4621 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4622 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4623 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4624 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4625 		}
4626 	}
4627 #if 0
4628 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4629 #endif
4630 
4631 	/*
4632 	 * Set up checksum offload parameters.
4633 	 */
4634 	reg = CSR_READ(sc, WMREG_RXCSUM);
4635 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4636 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4637 		reg |= RXCSUM_IPOFL;
4638 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4639 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4640 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4641 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4642 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4643 
4644 	/* Reset TBI's RXCFG count */
4645 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4646 
4647 	/*
4648 	 * Set up the interrupt registers.
4649 	 */
4650 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4651 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4652 	    ICR_RXO | ICR_RXT0;
4653 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4654 		sc->sc_icr |= ICR_RXCFG;
4655 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4656 
4657 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4658 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4659 		 || (sc->sc_type == WM_T_PCH2)) {
4660 		reg = CSR_READ(sc, WMREG_KABGTXD);
4661 		reg |= KABGTXD_BGSQLBIAS;
4662 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4663 	}
4664 
4665 	/* Set up the inter-packet gap. */
4666 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4667 
4668 	if (sc->sc_type >= WM_T_82543) {
4669 		/*
4670 		 * Set up the interrupt throttling register (units of 256ns)
4671 		 * Note that a footnote in Intel's documentation says this
4672 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4673 		 * or 10Mbit mode.  Empirically, it appears to be the case
4674 		 * that that is also true for the 1024ns units of the other
4675 		 * interrupt-related timer registers -- so, really, we ought
4676 		 * to divide this value by 4 when the link speed is low.
4677 		 *
4678 		 * XXX implement this division at link speed change!
4679 		 */
4680 
4681 		 /*
4682 		  * For N interrupts/sec, set this value to:
4683 		  * 1000000000 / (N * 256).  Note that we set the
4684 		  * absolute and packet timer values to this value
4685 		  * divided by 4 to get "simple timer" behavior.
4686 		  */
4687 
4688 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4689 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4690 	}
4691 
4692 	/* Set the VLAN ethernetype. */
4693 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4694 
4695 	/*
4696 	 * Set up the transmit control register; we start out with
4697 	 * a collision distance suitable for FDX, but update it whe
4698 	 * we resolve the media type.
4699 	 */
4700 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4701 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4702 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4703 	if (sc->sc_type >= WM_T_82571)
4704 		sc->sc_tctl |= TCTL_MULR;
4705 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4706 
4707 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4708 		/*
4709 		 * Write TDT after TCTL.EN is set.
4710 		 * See the document.
4711 		 */
4712 		CSR_WRITE(sc, WMREG_TDT, 0);
4713 	}
4714 
4715 	if (sc->sc_type == WM_T_80003) {
4716 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4717 		reg &= ~TCTL_EXT_GCEX_MASK;
4718 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4719 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4720 	}
4721 
4722 	/* Set the media. */
4723 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4724 		goto out;
4725 
4726 	/* Configure for OS presence */
4727 	wm_init_manageability(sc);
4728 
4729 	/*
4730 	 * Set up the receive control register; we actually program
4731 	 * the register when we set the receive filter.  Use multicast
4732 	 * address offset type 0.
4733 	 *
4734 	 * Only the i82544 has the ability to strip the incoming
4735 	 * CRC, so we don't enable that feature.
4736 	 */
4737 	sc->sc_mchash_type = 0;
4738 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4739 	    | RCTL_MO(sc->sc_mchash_type);
4740 
4741 	/*
4742 	 * The I350 has a bug where it always strips the CRC whether
4743 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4744 	 */
4745 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210))
4746 		sc->sc_rctl |= RCTL_SECRC;
4747 
4748 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4749 	    && (ifp->if_mtu > ETHERMTU)) {
4750 		sc->sc_rctl |= RCTL_LPE;
4751 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4752 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4753 	}
4754 
4755 	if (MCLBYTES == 2048) {
4756 		sc->sc_rctl |= RCTL_2k;
4757 	} else {
4758 		if (sc->sc_type >= WM_T_82543) {
4759 			switch (MCLBYTES) {
4760 			case 4096:
4761 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4762 				break;
4763 			case 8192:
4764 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4765 				break;
4766 			case 16384:
4767 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4768 				break;
4769 			default:
4770 				panic("wm_init: MCLBYTES %d unsupported",
4771 				    MCLBYTES);
4772 				break;
4773 			}
4774 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4775 	}
4776 
4777 	/* Set the receive filter. */
4778 	wm_set_filter(sc);
4779 
4780 	/* On 575 and later set RDT only if RX enabled */
4781 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4782 		for (i = 0; i < WM_NRXDESC; i++)
4783 			WM_INIT_RXDESC(sc, i);
4784 
4785 	/* Start the one second link check clock. */
4786 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4787 
4788 	/* ...all done! */
4789 	ifp->if_flags |= IFF_RUNNING;
4790 	ifp->if_flags &= ~IFF_OACTIVE;
4791 
4792  out:
4793 	sc->sc_if_flags = ifp->if_flags;
4794 	if (error)
4795 		log(LOG_ERR, "%s: interface not running\n",
4796 		    device_xname(sc->sc_dev));
4797 	return error;
4798 }
4799 
4800 /*
4801  * wm_rxdrain:
4802  *
4803  *	Drain the receive queue.
4804  */
4805 static void
4806 wm_rxdrain(struct wm_softc *sc)
4807 {
4808 	struct wm_rxsoft *rxs;
4809 	int i;
4810 
4811 	for (i = 0; i < WM_NRXDESC; i++) {
4812 		rxs = &sc->sc_rxsoft[i];
4813 		if (rxs->rxs_mbuf != NULL) {
4814 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4815 			m_freem(rxs->rxs_mbuf);
4816 			rxs->rxs_mbuf = NULL;
4817 		}
4818 	}
4819 }
4820 
4821 /*
4822  * wm_stop:		[ifnet interface function]
4823  *
4824  *	Stop transmission on the interface.
4825  */
4826 static void
4827 wm_stop(struct ifnet *ifp, int disable)
4828 {
4829 	struct wm_softc *sc = ifp->if_softc;
4830 	struct wm_txsoft *txs;
4831 	int i;
4832 
4833 	/* Stop the one second clock. */
4834 	callout_stop(&sc->sc_tick_ch);
4835 
4836 	/* Stop the 82547 Tx FIFO stall check timer. */
4837 	if (sc->sc_type == WM_T_82547)
4838 		callout_stop(&sc->sc_txfifo_ch);
4839 
4840 	if (sc->sc_flags & WM_F_HAS_MII) {
4841 		/* Down the MII. */
4842 		mii_down(&sc->sc_mii);
4843 	} else {
4844 #if 0
4845 		/* Should we clear PHY's status properly? */
4846 		wm_reset(sc);
4847 #endif
4848 	}
4849 
4850 	/* Stop the transmit and receive processes. */
4851 	CSR_WRITE(sc, WMREG_TCTL, 0);
4852 	CSR_WRITE(sc, WMREG_RCTL, 0);
4853 	sc->sc_rctl &= ~RCTL_EN;
4854 
4855 	/*
4856 	 * Clear the interrupt mask to ensure the device cannot assert its
4857 	 * interrupt line.
4858 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4859 	 * any currently pending or shared interrupt.
4860 	 */
4861 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4862 	sc->sc_icr = 0;
4863 
4864 	/* Release any queued transmit buffers. */
4865 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4866 		txs = &sc->sc_txsoft[i];
4867 		if (txs->txs_mbuf != NULL) {
4868 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4869 			m_freem(txs->txs_mbuf);
4870 			txs->txs_mbuf = NULL;
4871 		}
4872 	}
4873 
4874 	/* Mark the interface as down and cancel the watchdog timer. */
4875 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4876 	ifp->if_timer = 0;
4877 
4878 	if (disable)
4879 		wm_rxdrain(sc);
4880 
4881 #if 0 /* notyet */
4882 	if (sc->sc_type >= WM_T_82544)
4883 		CSR_WRITE(sc, WMREG_WUC, 0);
4884 #endif
4885 }
4886 
4887 void
4888 wm_get_auto_rd_done(struct wm_softc *sc)
4889 {
4890 	int i;
4891 
4892 	/* wait for eeprom to reload */
4893 	switch (sc->sc_type) {
4894 	case WM_T_82571:
4895 	case WM_T_82572:
4896 	case WM_T_82573:
4897 	case WM_T_82574:
4898 	case WM_T_82583:
4899 	case WM_T_82575:
4900 	case WM_T_82576:
4901 	case WM_T_82580:
4902 	case WM_T_82580ER:
4903 	case WM_T_I350:
4904 	case WM_T_I210:
4905 	case WM_T_I211:
4906 	case WM_T_80003:
4907 	case WM_T_ICH8:
4908 	case WM_T_ICH9:
4909 		for (i = 0; i < 10; i++) {
4910 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4911 				break;
4912 			delay(1000);
4913 		}
4914 		if (i == 10) {
4915 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4916 			    "complete\n", device_xname(sc->sc_dev));
4917 		}
4918 		break;
4919 	default:
4920 		break;
4921 	}
4922 }
4923 
4924 void
4925 wm_lan_init_done(struct wm_softc *sc)
4926 {
4927 	uint32_t reg = 0;
4928 	int i;
4929 
4930 	/* wait for eeprom to reload */
4931 	switch (sc->sc_type) {
4932 	case WM_T_ICH10:
4933 	case WM_T_PCH:
4934 	case WM_T_PCH2:
4935 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
4936 			reg = CSR_READ(sc, WMREG_STATUS);
4937 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
4938 				break;
4939 			delay(100);
4940 		}
4941 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
4942 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
4943 			    "complete\n", device_xname(sc->sc_dev), __func__);
4944 		}
4945 		break;
4946 	default:
4947 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
4948 		    __func__);
4949 		break;
4950 	}
4951 
4952 	reg &= ~STATUS_LAN_INIT_DONE;
4953 	CSR_WRITE(sc, WMREG_STATUS, reg);
4954 }
4955 
4956 void
4957 wm_get_cfg_done(struct wm_softc *sc)
4958 {
4959 	int mask;
4960 	uint32_t reg;
4961 	int i;
4962 
4963 	/* wait for eeprom to reload */
4964 	switch (sc->sc_type) {
4965 	case WM_T_82542_2_0:
4966 	case WM_T_82542_2_1:
4967 		/* null */
4968 		break;
4969 	case WM_T_82543:
4970 	case WM_T_82544:
4971 	case WM_T_82540:
4972 	case WM_T_82545:
4973 	case WM_T_82545_3:
4974 	case WM_T_82546:
4975 	case WM_T_82546_3:
4976 	case WM_T_82541:
4977 	case WM_T_82541_2:
4978 	case WM_T_82547:
4979 	case WM_T_82547_2:
4980 	case WM_T_82573:
4981 	case WM_T_82574:
4982 	case WM_T_82583:
4983 		/* generic */
4984 		delay(10*1000);
4985 		break;
4986 	case WM_T_80003:
4987 	case WM_T_82571:
4988 	case WM_T_82572:
4989 	case WM_T_82575:
4990 	case WM_T_82576:
4991 	case WM_T_82580:
4992 	case WM_T_82580ER:
4993 	case WM_T_I350:
4994 	case WM_T_I210:
4995 	case WM_T_I211:
4996 		if (sc->sc_type == WM_T_82571) {
4997 			/* Only 82571 shares port 0 */
4998 			mask = EEMNGCTL_CFGDONE_0;
4999 		} else
5000 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5001 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5002 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5003 				break;
5004 			delay(1000);
5005 		}
5006 		if (i >= WM_PHY_CFG_TIMEOUT) {
5007 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5008 				device_xname(sc->sc_dev), __func__));
5009 		}
5010 		break;
5011 	case WM_T_ICH8:
5012 	case WM_T_ICH9:
5013 	case WM_T_ICH10:
5014 	case WM_T_PCH:
5015 	case WM_T_PCH2:
5016 		if (sc->sc_type >= WM_T_PCH) {
5017 			reg = CSR_READ(sc, WMREG_STATUS);
5018 			if ((reg & STATUS_PHYRA) != 0)
5019 				CSR_WRITE(sc, WMREG_STATUS,
5020 				    reg & ~STATUS_PHYRA);
5021 		}
5022 		delay(10*1000);
5023 		break;
5024 	default:
5025 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5026 		    __func__);
5027 		break;
5028 	}
5029 }
5030 
5031 /*
5032  * wm_acquire_eeprom:
5033  *
5034  *	Perform the EEPROM handshake required on some chips.
5035  */
5036 static int
5037 wm_acquire_eeprom(struct wm_softc *sc)
5038 {
5039 	uint32_t reg;
5040 	int x;
5041 	int ret = 0;
5042 
5043 	/* always success */
5044 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5045 		return 0;
5046 
5047 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
5048 		ret = wm_get_swfwhw_semaphore(sc);
5049 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5050 		/* this will also do wm_get_swsm_semaphore() if needed */
5051 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5052 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5053 		ret = wm_get_swsm_semaphore(sc);
5054 	}
5055 
5056 	if (ret) {
5057 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5058 			__func__);
5059 		return 1;
5060 	}
5061 
5062 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5063 		reg = CSR_READ(sc, WMREG_EECD);
5064 
5065 		/* Request EEPROM access. */
5066 		reg |= EECD_EE_REQ;
5067 		CSR_WRITE(sc, WMREG_EECD, reg);
5068 
5069 		/* ..and wait for it to be granted. */
5070 		for (x = 0; x < 1000; x++) {
5071 			reg = CSR_READ(sc, WMREG_EECD);
5072 			if (reg & EECD_EE_GNT)
5073 				break;
5074 			delay(5);
5075 		}
5076 		if ((reg & EECD_EE_GNT) == 0) {
5077 			aprint_error_dev(sc->sc_dev,
5078 			    "could not acquire EEPROM GNT\n");
5079 			reg &= ~EECD_EE_REQ;
5080 			CSR_WRITE(sc, WMREG_EECD, reg);
5081 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5082 				wm_put_swfwhw_semaphore(sc);
5083 			if (sc->sc_flags & WM_F_SWFW_SYNC)
5084 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5085 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5086 				wm_put_swsm_semaphore(sc);
5087 			return 1;
5088 		}
5089 	}
5090 
5091 	return 0;
5092 }
5093 
5094 /*
5095  * wm_release_eeprom:
5096  *
5097  *	Release the EEPROM mutex.
5098  */
5099 static void
5100 wm_release_eeprom(struct wm_softc *sc)
5101 {
5102 	uint32_t reg;
5103 
5104 	/* always success */
5105 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5106 		return;
5107 
5108 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5109 		reg = CSR_READ(sc, WMREG_EECD);
5110 		reg &= ~EECD_EE_REQ;
5111 		CSR_WRITE(sc, WMREG_EECD, reg);
5112 	}
5113 
5114 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5115 		wm_put_swfwhw_semaphore(sc);
5116 	if (sc->sc_flags & WM_F_SWFW_SYNC)
5117 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5118 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5119 		wm_put_swsm_semaphore(sc);
5120 }
5121 
5122 /*
5123  * wm_eeprom_sendbits:
5124  *
5125  *	Send a series of bits to the EEPROM.
5126  */
5127 static void
5128 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5129 {
5130 	uint32_t reg;
5131 	int x;
5132 
5133 	reg = CSR_READ(sc, WMREG_EECD);
5134 
5135 	for (x = nbits; x > 0; x--) {
5136 		if (bits & (1U << (x - 1)))
5137 			reg |= EECD_DI;
5138 		else
5139 			reg &= ~EECD_DI;
5140 		CSR_WRITE(sc, WMREG_EECD, reg);
5141 		delay(2);
5142 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5143 		delay(2);
5144 		CSR_WRITE(sc, WMREG_EECD, reg);
5145 		delay(2);
5146 	}
5147 }
5148 
5149 /*
5150  * wm_eeprom_recvbits:
5151  *
5152  *	Receive a series of bits from the EEPROM.
5153  */
5154 static void
5155 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5156 {
5157 	uint32_t reg, val;
5158 	int x;
5159 
5160 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5161 
5162 	val = 0;
5163 	for (x = nbits; x > 0; x--) {
5164 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5165 		delay(2);
5166 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5167 			val |= (1U << (x - 1));
5168 		CSR_WRITE(sc, WMREG_EECD, reg);
5169 		delay(2);
5170 	}
5171 	*valp = val;
5172 }
5173 
5174 /*
5175  * wm_read_eeprom_uwire:
5176  *
5177  *	Read a word from the EEPROM using the MicroWire protocol.
5178  */
5179 static int
5180 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5181 {
5182 	uint32_t reg, val;
5183 	int i;
5184 
5185 	for (i = 0; i < wordcnt; i++) {
5186 		/* Clear SK and DI. */
5187 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5188 		CSR_WRITE(sc, WMREG_EECD, reg);
5189 
5190 		/*
5191 		 * XXX: workaround for a bug in qemu-0.12.x and prior
5192 		 * and Xen.
5193 		 *
5194 		 * We use this workaround only for 82540 because qemu's
5195 		 * e1000 act as 82540.
5196 		 */
5197 		if (sc->sc_type == WM_T_82540) {
5198 			reg |= EECD_SK;
5199 			CSR_WRITE(sc, WMREG_EECD, reg);
5200 			reg &= ~EECD_SK;
5201 			CSR_WRITE(sc, WMREG_EECD, reg);
5202 			delay(2);
5203 		}
5204 		/* XXX: end of workaround */
5205 
5206 		/* Set CHIP SELECT. */
5207 		reg |= EECD_CS;
5208 		CSR_WRITE(sc, WMREG_EECD, reg);
5209 		delay(2);
5210 
5211 		/* Shift in the READ command. */
5212 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5213 
5214 		/* Shift in address. */
5215 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5216 
5217 		/* Shift out the data. */
5218 		wm_eeprom_recvbits(sc, &val, 16);
5219 		data[i] = val & 0xffff;
5220 
5221 		/* Clear CHIP SELECT. */
5222 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5223 		CSR_WRITE(sc, WMREG_EECD, reg);
5224 		delay(2);
5225 	}
5226 
5227 	return 0;
5228 }
5229 
5230 /*
5231  * wm_spi_eeprom_ready:
5232  *
5233  *	Wait for a SPI EEPROM to be ready for commands.
5234  */
5235 static int
5236 wm_spi_eeprom_ready(struct wm_softc *sc)
5237 {
5238 	uint32_t val;
5239 	int usec;
5240 
5241 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5242 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5243 		wm_eeprom_recvbits(sc, &val, 8);
5244 		if ((val & SPI_SR_RDY) == 0)
5245 			break;
5246 	}
5247 	if (usec >= SPI_MAX_RETRIES) {
5248 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5249 		return 1;
5250 	}
5251 	return 0;
5252 }
5253 
5254 /*
5255  * wm_read_eeprom_spi:
5256  *
5257  *	Read a work from the EEPROM using the SPI protocol.
5258  */
5259 static int
5260 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5261 {
5262 	uint32_t reg, val;
5263 	int i;
5264 	uint8_t opc;
5265 
5266 	/* Clear SK and CS. */
5267 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5268 	CSR_WRITE(sc, WMREG_EECD, reg);
5269 	delay(2);
5270 
5271 	if (wm_spi_eeprom_ready(sc))
5272 		return 1;
5273 
5274 	/* Toggle CS to flush commands. */
5275 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5276 	delay(2);
5277 	CSR_WRITE(sc, WMREG_EECD, reg);
5278 	delay(2);
5279 
5280 	opc = SPI_OPC_READ;
5281 	if (sc->sc_ee_addrbits == 8 && word >= 128)
5282 		opc |= SPI_OPC_A8;
5283 
5284 	wm_eeprom_sendbits(sc, opc, 8);
5285 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5286 
5287 	for (i = 0; i < wordcnt; i++) {
5288 		wm_eeprom_recvbits(sc, &val, 16);
5289 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5290 	}
5291 
5292 	/* Raise CS and clear SK. */
5293 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5294 	CSR_WRITE(sc, WMREG_EECD, reg);
5295 	delay(2);
5296 
5297 	return 0;
5298 }
5299 
5300 #define EEPROM_CHECKSUM		0xBABA
5301 #define EEPROM_SIZE		0x0040
5302 
5303 /*
5304  * wm_validate_eeprom_checksum
5305  *
5306  * The checksum is defined as the sum of the first 64 (16 bit) words.
5307  */
5308 static int
5309 wm_validate_eeprom_checksum(struct wm_softc *sc)
5310 {
5311 	uint16_t checksum;
5312 	uint16_t eeprom_data;
5313 	int i;
5314 
5315 	checksum = 0;
5316 
5317 	/* Don't check for I211 */
5318 	if (sc->sc_type == WM_T_I211)
5319 		return 0;
5320 
5321 #ifdef WM_DEBUG
5322 	/* Dump EEPROM image for debug */
5323 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5324 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5325 	    || (sc->sc_type == WM_T_PCH2)) {
5326 		wm_read_eeprom(sc, 0x19, 1, &eeprom_data);
5327 		if ((eeprom_data & 0x40) == 0) {
5328 			DPRINTF(WM_DEBUG_NVM,("%s: NVM need to be updated\n",
5329 				device_xname(sc->sc_dev)));
5330 		}
5331 	}
5332 
5333 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
5334 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5335 		for (i = 0; i < EEPROM_SIZE; i++) {
5336 			if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5337 				printf("XX ");
5338 			else
5339 				printf("%04x ", eeprom_data);
5340 			if (i % 8 == 7)
5341 				printf("\n");
5342 		}
5343 	}
5344 
5345 #endif /* WM_DEBUG */
5346 
5347 	for (i = 0; i < EEPROM_SIZE; i++) {
5348 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5349 			return 1;
5350 		checksum += eeprom_data;
5351 	}
5352 
5353 	if (checksum != (uint16_t) EEPROM_CHECKSUM)
5354 		return 1;
5355 
5356 	return 0;
5357 }
5358 
5359 /*
5360  * wm_read_eeprom:
5361  *
5362  *	Read data from the serial EEPROM.
5363  */
5364 static int
5365 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5366 {
5367 	int rv;
5368 
5369 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
5370 		return 1;
5371 
5372 	if (wm_acquire_eeprom(sc))
5373 		return 1;
5374 
5375 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5376 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5377 	    || (sc->sc_type == WM_T_PCH2))
5378 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5379 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5380 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5381 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
5382 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5383 	else
5384 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5385 
5386 	wm_release_eeprom(sc);
5387 	return rv;
5388 }
5389 
5390 static int
5391 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5392     uint16_t *data)
5393 {
5394 	int i, eerd = 0;
5395 	int error = 0;
5396 
5397 	for (i = 0; i < wordcnt; i++) {
5398 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5399 
5400 		CSR_WRITE(sc, WMREG_EERD, eerd);
5401 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5402 		if (error != 0)
5403 			break;
5404 
5405 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5406 	}
5407 
5408 	return error;
5409 }
5410 
5411 static int
5412 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5413 {
5414 	uint32_t attempts = 100000;
5415 	uint32_t i, reg = 0;
5416 	int32_t done = -1;
5417 
5418 	for (i = 0; i < attempts; i++) {
5419 		reg = CSR_READ(sc, rw);
5420 
5421 		if (reg & EERD_DONE) {
5422 			done = 0;
5423 			break;
5424 		}
5425 		delay(5);
5426 	}
5427 
5428 	return done;
5429 }
5430 
5431 static int
5432 wm_check_alt_mac_addr(struct wm_softc *sc)
5433 {
5434 	uint16_t myea[ETHER_ADDR_LEN / 2];
5435 	uint16_t offset = EEPROM_OFF_MACADDR;
5436 
5437 	/* Try to read alternative MAC address pointer */
5438 	if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5439 		return -1;
5440 
5441 	/* Check pointer */
5442 	if (offset == 0xffff)
5443 		return -1;
5444 
5445 	/*
5446 	 * Check whether alternative MAC address is valid or not.
5447 	 * Some cards have non 0xffff pointer but those don't use
5448 	 * alternative MAC address in reality.
5449 	 *
5450 	 * Check whether the broadcast bit is set or not.
5451 	 */
5452 	if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5453 		if (((myea[0] & 0xff) & 0x01) == 0)
5454 			return 0; /* found! */
5455 
5456 	/* not found */
5457 	return -1;
5458 }
5459 
5460 static int
5461 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5462 {
5463 	uint16_t myea[ETHER_ADDR_LEN / 2];
5464 	uint16_t offset = EEPROM_OFF_MACADDR;
5465 	int do_invert = 0;
5466 
5467 	switch (sc->sc_type) {
5468 	case WM_T_82580:
5469 	case WM_T_82580ER:
5470 	case WM_T_I350:
5471 		switch (sc->sc_funcid) {
5472 		case 0:
5473 			/* default value (== EEPROM_OFF_MACADDR) */
5474 			break;
5475 		case 1:
5476 			offset = EEPROM_OFF_LAN1;
5477 			break;
5478 		case 2:
5479 			offset = EEPROM_OFF_LAN2;
5480 			break;
5481 		case 3:
5482 			offset = EEPROM_OFF_LAN3;
5483 			break;
5484 		default:
5485 			goto bad;
5486 			/* NOTREACHED */
5487 			break;
5488 		}
5489 		break;
5490 	case WM_T_82571:
5491 	case WM_T_82575:
5492 	case WM_T_82576:
5493 	case WM_T_80003:
5494 	case WM_T_I210:
5495 	case WM_T_I211:
5496 		if (wm_check_alt_mac_addr(sc) != 0) {
5497 			/* reset the offset to LAN0 */
5498 			offset = EEPROM_OFF_MACADDR;
5499 			if ((sc->sc_funcid & 0x01) == 1)
5500 				do_invert = 1;
5501 			goto do_read;
5502 		}
5503 		switch (sc->sc_funcid) {
5504 		case 0:
5505 			/*
5506 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5507 			 * itself.
5508 			 */
5509 			break;
5510 		case 1:
5511 			offset += EEPROM_OFF_MACADDR_LAN1;
5512 			break;
5513 		case 2:
5514 			offset += EEPROM_OFF_MACADDR_LAN2;
5515 			break;
5516 		case 3:
5517 			offset += EEPROM_OFF_MACADDR_LAN3;
5518 			break;
5519 		default:
5520 			goto bad;
5521 			/* NOTREACHED */
5522 			break;
5523 		}
5524 		break;
5525 	default:
5526 		if ((sc->sc_funcid & 0x01) == 1)
5527 			do_invert = 1;
5528 		break;
5529 	}
5530 
5531  do_read:
5532 	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5533 		myea) != 0) {
5534 		goto bad;
5535 	}
5536 
5537 	enaddr[0] = myea[0] & 0xff;
5538 	enaddr[1] = myea[0] >> 8;
5539 	enaddr[2] = myea[1] & 0xff;
5540 	enaddr[3] = myea[1] >> 8;
5541 	enaddr[4] = myea[2] & 0xff;
5542 	enaddr[5] = myea[2] >> 8;
5543 
5544 	/*
5545 	 * Toggle the LSB of the MAC address on the second port
5546 	 * of some dual port cards.
5547 	 */
5548 	if (do_invert != 0)
5549 		enaddr[5] ^= 1;
5550 
5551 	return 0;
5552 
5553  bad:
5554 	aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5555 
5556 	return -1;
5557 }
5558 
5559 /*
5560  * wm_add_rxbuf:
5561  *
5562  *	Add a receive buffer to the indiciated descriptor.
5563  */
5564 static int
5565 wm_add_rxbuf(struct wm_softc *sc, int idx)
5566 {
5567 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5568 	struct mbuf *m;
5569 	int error;
5570 
5571 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5572 	if (m == NULL)
5573 		return ENOBUFS;
5574 
5575 	MCLGET(m, M_DONTWAIT);
5576 	if ((m->m_flags & M_EXT) == 0) {
5577 		m_freem(m);
5578 		return ENOBUFS;
5579 	}
5580 
5581 	if (rxs->rxs_mbuf != NULL)
5582 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5583 
5584 	rxs->rxs_mbuf = m;
5585 
5586 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5587 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5588 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
5589 	if (error) {
5590 		/* XXX XXX XXX */
5591 		aprint_error_dev(sc->sc_dev,
5592 		    "unable to load rx DMA map %d, error = %d\n",
5593 		    idx, error);
5594 		panic("wm_add_rxbuf");
5595 	}
5596 
5597 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5598 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5599 
5600 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5601 		if ((sc->sc_rctl & RCTL_EN) != 0)
5602 			WM_INIT_RXDESC(sc, idx);
5603 	} else
5604 		WM_INIT_RXDESC(sc, idx);
5605 
5606 	return 0;
5607 }
5608 
5609 /*
5610  * wm_set_ral:
5611  *
5612  *	Set an entery in the receive address list.
5613  */
5614 static void
5615 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5616 {
5617 	uint32_t ral_lo, ral_hi;
5618 
5619 	if (enaddr != NULL) {
5620 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5621 		    (enaddr[3] << 24);
5622 		ral_hi = enaddr[4] | (enaddr[5] << 8);
5623 		ral_hi |= RAL_AV;
5624 	} else {
5625 		ral_lo = 0;
5626 		ral_hi = 0;
5627 	}
5628 
5629 	if (sc->sc_type >= WM_T_82544) {
5630 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5631 		    ral_lo);
5632 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5633 		    ral_hi);
5634 	} else {
5635 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5636 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5637 	}
5638 }
5639 
5640 /*
5641  * wm_mchash:
5642  *
5643  *	Compute the hash of the multicast address for the 4096-bit
5644  *	multicast filter.
5645  */
5646 static uint32_t
5647 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5648 {
5649 	static const int lo_shift[4] = { 4, 3, 2, 0 };
5650 	static const int hi_shift[4] = { 4, 5, 6, 8 };
5651 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5652 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5653 	uint32_t hash;
5654 
5655 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5656 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5657 	    || (sc->sc_type == WM_T_PCH2)) {
5658 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5659 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5660 		return (hash & 0x3ff);
5661 	}
5662 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5663 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5664 
5665 	return (hash & 0xfff);
5666 }
5667 
5668 /*
5669  * wm_set_filter:
5670  *
5671  *	Set up the receive filter.
5672  */
5673 static void
5674 wm_set_filter(struct wm_softc *sc)
5675 {
5676 	struct ethercom *ec = &sc->sc_ethercom;
5677 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5678 	struct ether_multi *enm;
5679 	struct ether_multistep step;
5680 	bus_addr_t mta_reg;
5681 	uint32_t hash, reg, bit;
5682 	int i, size;
5683 
5684 	if (sc->sc_type >= WM_T_82544)
5685 		mta_reg = WMREG_CORDOVA_MTA;
5686 	else
5687 		mta_reg = WMREG_MTA;
5688 
5689 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5690 
5691 	if (ifp->if_flags & IFF_BROADCAST)
5692 		sc->sc_rctl |= RCTL_BAM;
5693 	if (ifp->if_flags & IFF_PROMISC) {
5694 		sc->sc_rctl |= RCTL_UPE;
5695 		goto allmulti;
5696 	}
5697 
5698 	/*
5699 	 * Set the station address in the first RAL slot, and
5700 	 * clear the remaining slots.
5701 	 */
5702 	if (sc->sc_type == WM_T_ICH8)
5703 		size = WM_RAL_TABSIZE_ICH8 -1;
5704 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5705 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2))
5706 		size = WM_RAL_TABSIZE_ICH8;
5707 	else if (sc->sc_type == WM_T_82575)
5708 		size = WM_RAL_TABSIZE_82575;
5709 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5710 		size = WM_RAL_TABSIZE_82576;
5711 	else if (sc->sc_type == WM_T_I350)
5712 		size = WM_RAL_TABSIZE_I350;
5713 	else
5714 		size = WM_RAL_TABSIZE;
5715 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5716 	for (i = 1; i < size; i++)
5717 		wm_set_ral(sc, NULL, i);
5718 
5719 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5720 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5721 	    || (sc->sc_type == WM_T_PCH2))
5722 		size = WM_ICH8_MC_TABSIZE;
5723 	else
5724 		size = WM_MC_TABSIZE;
5725 	/* Clear out the multicast table. */
5726 	for (i = 0; i < size; i++)
5727 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
5728 
5729 	ETHER_FIRST_MULTI(step, ec, enm);
5730 	while (enm != NULL) {
5731 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5732 			/*
5733 			 * We must listen to a range of multicast addresses.
5734 			 * For now, just accept all multicasts, rather than
5735 			 * trying to set only those filter bits needed to match
5736 			 * the range.  (At this time, the only use of address
5737 			 * ranges is for IP multicast routing, for which the
5738 			 * range is big enough to require all bits set.)
5739 			 */
5740 			goto allmulti;
5741 		}
5742 
5743 		hash = wm_mchash(sc, enm->enm_addrlo);
5744 
5745 		reg = (hash >> 5);
5746 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5747 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5748 		    || (sc->sc_type == WM_T_PCH2))
5749 			reg &= 0x1f;
5750 		else
5751 			reg &= 0x7f;
5752 		bit = hash & 0x1f;
5753 
5754 		hash = CSR_READ(sc, mta_reg + (reg << 2));
5755 		hash |= 1U << bit;
5756 
5757 		/* XXX Hardware bug?? */
5758 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5759 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5760 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5761 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5762 		} else
5763 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5764 
5765 		ETHER_NEXT_MULTI(step, enm);
5766 	}
5767 
5768 	ifp->if_flags &= ~IFF_ALLMULTI;
5769 	goto setit;
5770 
5771  allmulti:
5772 	ifp->if_flags |= IFF_ALLMULTI;
5773 	sc->sc_rctl |= RCTL_MPE;
5774 
5775  setit:
5776 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5777 }
5778 
5779 /*
5780  * wm_tbi_mediainit:
5781  *
5782  *	Initialize media for use on 1000BASE-X devices.
5783  */
5784 static void
5785 wm_tbi_mediainit(struct wm_softc *sc)
5786 {
5787 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5788 	const char *sep = "";
5789 
5790 	if (sc->sc_type < WM_T_82543)
5791 		sc->sc_tipg = TIPG_WM_DFLT;
5792 	else
5793 		sc->sc_tipg = TIPG_LG_DFLT;
5794 
5795 	sc->sc_tbi_anegticks = 5;
5796 
5797 	/* Initialize our media structures */
5798 	sc->sc_mii.mii_ifp = ifp;
5799 
5800 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5801 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5802 	    wm_tbi_mediastatus);
5803 
5804 	/*
5805 	 * SWD Pins:
5806 	 *
5807 	 *	0 = Link LED (output)
5808 	 *	1 = Loss Of Signal (input)
5809 	 */
5810 	sc->sc_ctrl |= CTRL_SWDPIO(0);
5811 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5812 
5813 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5814 
5815 #define	ADD(ss, mm, dd)							\
5816 do {									\
5817 	aprint_normal("%s%s", sep, ss);					\
5818 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
5819 	sep = ", ";							\
5820 } while (/*CONSTCOND*/0)
5821 
5822 	aprint_normal_dev(sc->sc_dev, "");
5823 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5824 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5825 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5826 	aprint_normal("\n");
5827 
5828 #undef ADD
5829 
5830 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5831 }
5832 
5833 /*
5834  * wm_tbi_mediastatus:	[ifmedia interface function]
5835  *
5836  *	Get the current interface media status on a 1000BASE-X device.
5837  */
5838 static void
5839 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5840 {
5841 	struct wm_softc *sc = ifp->if_softc;
5842 	uint32_t ctrl, status;
5843 
5844 	ifmr->ifm_status = IFM_AVALID;
5845 	ifmr->ifm_active = IFM_ETHER;
5846 
5847 	status = CSR_READ(sc, WMREG_STATUS);
5848 	if ((status & STATUS_LU) == 0) {
5849 		ifmr->ifm_active |= IFM_NONE;
5850 		return;
5851 	}
5852 
5853 	ifmr->ifm_status |= IFM_ACTIVE;
5854 	ifmr->ifm_active |= IFM_1000_SX;
5855 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5856 		ifmr->ifm_active |= IFM_FDX;
5857 	ctrl = CSR_READ(sc, WMREG_CTRL);
5858 	if (ctrl & CTRL_RFCE)
5859 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5860 	if (ctrl & CTRL_TFCE)
5861 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5862 }
5863 
5864 /*
5865  * wm_tbi_mediachange:	[ifmedia interface function]
5866  *
5867  *	Set hardware to newly-selected media on a 1000BASE-X device.
5868  */
5869 static int
5870 wm_tbi_mediachange(struct ifnet *ifp)
5871 {
5872 	struct wm_softc *sc = ifp->if_softc;
5873 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5874 	uint32_t status;
5875 	int i;
5876 
5877 	sc->sc_txcw = 0;
5878 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5879 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5880 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5881 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5882 		sc->sc_txcw |= TXCW_ANE;
5883 	} else {
5884 		/*
5885 		 * If autonegotiation is turned off, force link up and turn on
5886 		 * full duplex
5887 		 */
5888 		sc->sc_txcw &= ~TXCW_ANE;
5889 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5890 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5891 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5892 		delay(1000);
5893 	}
5894 
5895 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5896 		    device_xname(sc->sc_dev),sc->sc_txcw));
5897 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5898 	delay(10000);
5899 
5900 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5901 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5902 
5903 	/*
5904 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
5905 	 * optics detect a signal, 0 if they don't.
5906 	 */
5907 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
5908 		/* Have signal; wait for the link to come up. */
5909 
5910 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5911 			/*
5912 			 * Reset the link, and let autonegotiation do its thing
5913 			 */
5914 			sc->sc_ctrl |= CTRL_LRST;
5915 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5916 			delay(1000);
5917 			sc->sc_ctrl &= ~CTRL_LRST;
5918 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5919 			delay(1000);
5920 		}
5921 
5922 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
5923 			delay(10000);
5924 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
5925 				break;
5926 		}
5927 
5928 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
5929 			    device_xname(sc->sc_dev),i));
5930 
5931 		status = CSR_READ(sc, WMREG_STATUS);
5932 		DPRINTF(WM_DEBUG_LINK,
5933 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
5934 			device_xname(sc->sc_dev),status, STATUS_LU));
5935 		if (status & STATUS_LU) {
5936 			/* Link is up. */
5937 			DPRINTF(WM_DEBUG_LINK,
5938 			    ("%s: LINK: set media -> link up %s\n",
5939 			    device_xname(sc->sc_dev),
5940 			    (status & STATUS_FD) ? "FDX" : "HDX"));
5941 
5942 			/*
5943 			 * NOTE: CTRL will update TFCE and RFCE automatically,
5944 			 * so we should update sc->sc_ctrl
5945 			 */
5946 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
5947 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
5948 			sc->sc_fcrtl &= ~FCRTL_XONE;
5949 			if (status & STATUS_FD)
5950 				sc->sc_tctl |=
5951 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5952 			else
5953 				sc->sc_tctl |=
5954 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
5955 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
5956 				sc->sc_fcrtl |= FCRTL_XONE;
5957 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5958 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
5959 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
5960 				      sc->sc_fcrtl);
5961 			sc->sc_tbi_linkup = 1;
5962 		} else {
5963 			if (i == WM_LINKUP_TIMEOUT)
5964 				wm_check_for_link(sc);
5965 			/* Link is down. */
5966 			DPRINTF(WM_DEBUG_LINK,
5967 			    ("%s: LINK: set media -> link down\n",
5968 			    device_xname(sc->sc_dev)));
5969 			sc->sc_tbi_linkup = 0;
5970 		}
5971 	} else {
5972 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
5973 		    device_xname(sc->sc_dev)));
5974 		sc->sc_tbi_linkup = 0;
5975 	}
5976 
5977 	wm_tbi_set_linkled(sc);
5978 
5979 	return 0;
5980 }
5981 
5982 /*
5983  * wm_tbi_set_linkled:
5984  *
5985  *	Update the link LED on 1000BASE-X devices.
5986  */
5987 static void
5988 wm_tbi_set_linkled(struct wm_softc *sc)
5989 {
5990 
5991 	if (sc->sc_tbi_linkup)
5992 		sc->sc_ctrl |= CTRL_SWDPIN(0);
5993 	else
5994 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
5995 
5996 	/* 82540 or newer devices are active low */
5997 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
5998 
5999 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6000 }
6001 
6002 /*
6003  * wm_tbi_check_link:
6004  *
6005  *	Check the link on 1000BASE-X devices.
6006  */
6007 static void
6008 wm_tbi_check_link(struct wm_softc *sc)
6009 {
6010 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6011 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6012 	uint32_t rxcw, ctrl, status;
6013 
6014 	status = CSR_READ(sc, WMREG_STATUS);
6015 
6016 	rxcw = CSR_READ(sc, WMREG_RXCW);
6017 	ctrl = CSR_READ(sc, WMREG_CTRL);
6018 
6019 	/* set link status */
6020 	if ((status & STATUS_LU) == 0) {
6021 		DPRINTF(WM_DEBUG_LINK,
6022 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6023 		sc->sc_tbi_linkup = 0;
6024 	} else if (sc->sc_tbi_linkup == 0) {
6025 		DPRINTF(WM_DEBUG_LINK,
6026 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6027 		    (status & STATUS_FD) ? "FDX" : "HDX"));
6028 		sc->sc_tbi_linkup = 1;
6029 	}
6030 
6031 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6032 	    && ((status & STATUS_LU) == 0)) {
6033 		sc->sc_tbi_linkup = 0;
6034 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6035 			/* RXCFG storm! */
6036 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6037 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6038 			wm_init(ifp);
6039 			ifp->if_start(ifp);
6040 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6041 			/* If the timer expired, retry autonegotiation */
6042 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6043 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6044 				sc->sc_tbi_ticks = 0;
6045 				/*
6046 				 * Reset the link, and let autonegotiation do
6047 				 * its thing
6048 				 */
6049 				sc->sc_ctrl |= CTRL_LRST;
6050 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6051 				delay(1000);
6052 				sc->sc_ctrl &= ~CTRL_LRST;
6053 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6054 				delay(1000);
6055 				CSR_WRITE(sc, WMREG_TXCW,
6056 				    sc->sc_txcw & ~TXCW_ANE);
6057 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6058 			}
6059 		}
6060 	}
6061 
6062 	wm_tbi_set_linkled(sc);
6063 }
6064 
6065 /*
6066  * wm_gmii_reset:
6067  *
6068  *	Reset the PHY.
6069  */
6070 static void
6071 wm_gmii_reset(struct wm_softc *sc)
6072 {
6073 	uint32_t reg;
6074 	int rv;
6075 
6076 	/* get phy semaphore */
6077 	switch (sc->sc_type) {
6078 	case WM_T_82571:
6079 	case WM_T_82572:
6080 	case WM_T_82573:
6081 	case WM_T_82574:
6082 	case WM_T_82583:
6083 		 /* XXX should get sw semaphore, too */
6084 		rv = wm_get_swsm_semaphore(sc);
6085 		break;
6086 	case WM_T_82575:
6087 	case WM_T_82576:
6088 	case WM_T_82580:
6089 	case WM_T_82580ER:
6090 	case WM_T_I350:
6091 	case WM_T_I210:
6092 	case WM_T_I211:
6093 	case WM_T_80003:
6094 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6095 		break;
6096 	case WM_T_ICH8:
6097 	case WM_T_ICH9:
6098 	case WM_T_ICH10:
6099 	case WM_T_PCH:
6100 	case WM_T_PCH2:
6101 		rv = wm_get_swfwhw_semaphore(sc);
6102 		break;
6103 	default:
6104 		/* nothing to do*/
6105 		rv = 0;
6106 		break;
6107 	}
6108 	if (rv != 0) {
6109 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6110 		    __func__);
6111 		return;
6112 	}
6113 
6114 	switch (sc->sc_type) {
6115 	case WM_T_82542_2_0:
6116 	case WM_T_82542_2_1:
6117 		/* null */
6118 		break;
6119 	case WM_T_82543:
6120 		/*
6121 		 * With 82543, we need to force speed and duplex on the MAC
6122 		 * equal to what the PHY speed and duplex configuration is.
6123 		 * In addition, we need to perform a hardware reset on the PHY
6124 		 * to take it out of reset.
6125 		 */
6126 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6127 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6128 
6129 		/* The PHY reset pin is active-low. */
6130 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6131 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6132 		    CTRL_EXT_SWDPIN(4));
6133 		reg |= CTRL_EXT_SWDPIO(4);
6134 
6135 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6136 		delay(10*1000);
6137 
6138 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6139 		delay(150);
6140 #if 0
6141 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6142 #endif
6143 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
6144 		break;
6145 	case WM_T_82544:	/* reset 10000us */
6146 	case WM_T_82540:
6147 	case WM_T_82545:
6148 	case WM_T_82545_3:
6149 	case WM_T_82546:
6150 	case WM_T_82546_3:
6151 	case WM_T_82541:
6152 	case WM_T_82541_2:
6153 	case WM_T_82547:
6154 	case WM_T_82547_2:
6155 	case WM_T_82571:	/* reset 100us */
6156 	case WM_T_82572:
6157 	case WM_T_82573:
6158 	case WM_T_82574:
6159 	case WM_T_82575:
6160 	case WM_T_82576:
6161 	case WM_T_82580:
6162 	case WM_T_82580ER:
6163 	case WM_T_I350:
6164 	case WM_T_I210:
6165 	case WM_T_I211:
6166 	case WM_T_82583:
6167 	case WM_T_80003:
6168 		/* generic reset */
6169 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6170 		delay(20000);
6171 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6172 		delay(20000);
6173 
6174 		if ((sc->sc_type == WM_T_82541)
6175 		    || (sc->sc_type == WM_T_82541_2)
6176 		    || (sc->sc_type == WM_T_82547)
6177 		    || (sc->sc_type == WM_T_82547_2)) {
6178 			/* workaround for igp are done in igp_reset() */
6179 			/* XXX add code to set LED after phy reset */
6180 		}
6181 		break;
6182 	case WM_T_ICH8:
6183 	case WM_T_ICH9:
6184 	case WM_T_ICH10:
6185 	case WM_T_PCH:
6186 	case WM_T_PCH2:
6187 		/* generic reset */
6188 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6189 		delay(100);
6190 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6191 		delay(150);
6192 		break;
6193 	default:
6194 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6195 		    __func__);
6196 		break;
6197 	}
6198 
6199 	/* release PHY semaphore */
6200 	switch (sc->sc_type) {
6201 	case WM_T_82571:
6202 	case WM_T_82572:
6203 	case WM_T_82573:
6204 	case WM_T_82574:
6205 	case WM_T_82583:
6206 		 /* XXX should put sw semaphore, too */
6207 		wm_put_swsm_semaphore(sc);
6208 		break;
6209 	case WM_T_82575:
6210 	case WM_T_82576:
6211 	case WM_T_82580:
6212 	case WM_T_82580ER:
6213 	case WM_T_I350:
6214 	case WM_T_I210:
6215 	case WM_T_I211:
6216 	case WM_T_80003:
6217 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6218 		break;
6219 	case WM_T_ICH8:
6220 	case WM_T_ICH9:
6221 	case WM_T_ICH10:
6222 	case WM_T_PCH:
6223 	case WM_T_PCH2:
6224 		wm_put_swfwhw_semaphore(sc);
6225 		break;
6226 	default:
6227 		/* nothing to do*/
6228 		rv = 0;
6229 		break;
6230 	}
6231 
6232 	/* get_cfg_done */
6233 	wm_get_cfg_done(sc);
6234 
6235 	/* extra setup */
6236 	switch (sc->sc_type) {
6237 	case WM_T_82542_2_0:
6238 	case WM_T_82542_2_1:
6239 	case WM_T_82543:
6240 	case WM_T_82544:
6241 	case WM_T_82540:
6242 	case WM_T_82545:
6243 	case WM_T_82545_3:
6244 	case WM_T_82546:
6245 	case WM_T_82546_3:
6246 	case WM_T_82541_2:
6247 	case WM_T_82547_2:
6248 	case WM_T_82571:
6249 	case WM_T_82572:
6250 	case WM_T_82573:
6251 	case WM_T_82574:
6252 	case WM_T_82575:
6253 	case WM_T_82576:
6254 	case WM_T_82580:
6255 	case WM_T_82580ER:
6256 	case WM_T_I350:
6257 	case WM_T_I210:
6258 	case WM_T_I211:
6259 	case WM_T_82583:
6260 	case WM_T_80003:
6261 		/* null */
6262 		break;
6263 	case WM_T_82541:
6264 	case WM_T_82547:
6265 		/* XXX Configure actively LED after PHY reset */
6266 		break;
6267 	case WM_T_ICH8:
6268 	case WM_T_ICH9:
6269 	case WM_T_ICH10:
6270 	case WM_T_PCH:
6271 	case WM_T_PCH2:
6272 		/* Allow time for h/w to get to a quiescent state afer reset */
6273 		delay(10*1000);
6274 
6275 		if (sc->sc_type == WM_T_PCH)
6276 			wm_hv_phy_workaround_ich8lan(sc);
6277 
6278 		if (sc->sc_type == WM_T_PCH2)
6279 			wm_lv_phy_workaround_ich8lan(sc);
6280 
6281 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6282 			/*
6283 			 * dummy read to clear the phy wakeup bit after lcd
6284 			 * reset
6285 			 */
6286 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6287 		}
6288 
6289 		/*
6290 		 * XXX Configure the LCD with th extended configuration region
6291 		 * in NVM
6292 		 */
6293 
6294 		/* Configure the LCD with the OEM bits in NVM */
6295 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6296 			/*
6297 			 * Disable LPLU.
6298 			 * XXX It seems that 82567 has LPLU, too.
6299 			 */
6300 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6301 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6302 			reg |= HV_OEM_BITS_ANEGNOW;
6303 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6304 		}
6305 		break;
6306 	default:
6307 		panic("%s: unknown type\n", __func__);
6308 		break;
6309 	}
6310 }
6311 
6312 /*
6313  * wm_gmii_mediainit:
6314  *
6315  *	Initialize media for use on 1000BASE-T devices.
6316  */
6317 static void
6318 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6319 {
6320 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6321 	struct mii_data *mii = &sc->sc_mii;
6322 
6323 	/* We have MII. */
6324 	sc->sc_flags |= WM_F_HAS_MII;
6325 
6326 	if (sc->sc_type == WM_T_80003)
6327 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6328 	else
6329 		sc->sc_tipg = TIPG_1000T_DFLT;
6330 
6331 	/*
6332 	 * Let the chip set speed/duplex on its own based on
6333 	 * signals from the PHY.
6334 	 * XXXbouyer - I'm not sure this is right for the 80003,
6335 	 * the em driver only sets CTRL_SLU here - but it seems to work.
6336 	 */
6337 	sc->sc_ctrl |= CTRL_SLU;
6338 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6339 
6340 	/* Initialize our media structures and probe the GMII. */
6341 	mii->mii_ifp = ifp;
6342 
6343 	/*
6344 	 * Determine the PHY access method.
6345 	 *
6346 	 *  For SGMII, use SGMII specific method.
6347 	 *
6348 	 *  For some devices, we can determine the PHY access method
6349 	 * from sc_type.
6350 	 *
6351 	 *  For ICH8 variants, it's difficult to detemine the PHY access
6352 	 * method by sc_type, so use the PCI product ID for some devices.
6353 	 * For other ICH8 variants, try to use igp's method. If the PHY
6354 	 * can't detect, then use bm's method.
6355 	 */
6356 	switch (prodid) {
6357 	case PCI_PRODUCT_INTEL_PCH_M_LM:
6358 	case PCI_PRODUCT_INTEL_PCH_M_LC:
6359 		/* 82577 */
6360 		sc->sc_phytype = WMPHY_82577;
6361 		mii->mii_readreg = wm_gmii_hv_readreg;
6362 		mii->mii_writereg = wm_gmii_hv_writereg;
6363 		break;
6364 	case PCI_PRODUCT_INTEL_PCH_D_DM:
6365 	case PCI_PRODUCT_INTEL_PCH_D_DC:
6366 		/* 82578 */
6367 		sc->sc_phytype = WMPHY_82578;
6368 		mii->mii_readreg = wm_gmii_hv_readreg;
6369 		mii->mii_writereg = wm_gmii_hv_writereg;
6370 		break;
6371 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6372 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
6373 		/* 82579 */
6374 		sc->sc_phytype = WMPHY_82579;
6375 		mii->mii_readreg = wm_gmii_hv_readreg;
6376 		mii->mii_writereg = wm_gmii_hv_writereg;
6377 		break;
6378 	case PCI_PRODUCT_INTEL_82801I_BM:
6379 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6380 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6381 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6382 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6383 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6384 		/* 82567 */
6385 		sc->sc_phytype = WMPHY_BM;
6386 		mii->mii_readreg = wm_gmii_bm_readreg;
6387 		mii->mii_writereg = wm_gmii_bm_writereg;
6388 		break;
6389 	default:
6390 		if ((sc->sc_flags & WM_F_SGMII) != 0) {
6391 			mii->mii_readreg = wm_sgmii_readreg;
6392 			mii->mii_writereg = wm_sgmii_writereg;
6393 		} else if (sc->sc_type >= WM_T_80003) {
6394 			mii->mii_readreg = wm_gmii_i80003_readreg;
6395 			mii->mii_writereg = wm_gmii_i80003_writereg;
6396 		} else if (sc->sc_type >= WM_T_I210) {
6397 			mii->mii_readreg = wm_gmii_i82544_readreg;
6398 			mii->mii_writereg = wm_gmii_i82544_writereg;
6399 		} else if (sc->sc_type >= WM_T_82580) {
6400 			sc->sc_phytype = WMPHY_82580;
6401 			mii->mii_readreg = wm_gmii_82580_readreg;
6402 			mii->mii_writereg = wm_gmii_82580_writereg;
6403 		} else if (sc->sc_type >= WM_T_82544) {
6404 			mii->mii_readreg = wm_gmii_i82544_readreg;
6405 			mii->mii_writereg = wm_gmii_i82544_writereg;
6406 		} else {
6407 			mii->mii_readreg = wm_gmii_i82543_readreg;
6408 			mii->mii_writereg = wm_gmii_i82543_writereg;
6409 		}
6410 		break;
6411 	}
6412 	mii->mii_statchg = wm_gmii_statchg;
6413 
6414 	wm_gmii_reset(sc);
6415 
6416 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
6417 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6418 	    wm_gmii_mediastatus);
6419 
6420 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6421 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6422 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
6423 	    || (sc->sc_type == WM_T_I211)) {
6424 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
6425 			/* Attach only one port */
6426 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6427 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
6428 		} else {
6429 			int i;
6430 			uint32_t ctrl_ext;
6431 
6432 			/* Power on sgmii phy if it is disabled */
6433 			ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6434 			CSR_WRITE(sc, WMREG_CTRL_EXT,
6435 			    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6436 			CSR_WRITE_FLUSH(sc);
6437 			delay(300*1000); /* XXX too long */
6438 
6439 			/* from 1 to 8 */
6440 			for (i = 1; i < 8; i++)
6441 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6442 				    i, MII_OFFSET_ANY, MIIF_DOPAUSE);
6443 
6444 			/* restore previous sfp cage power state */
6445 			CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6446 		}
6447 	} else {
6448 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6449 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6450 	}
6451 
6452 	/*
6453 	 * If the MAC is PCH2 and failed to detect MII PHY, call
6454 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6455 	 */
6456 	if ((sc->sc_type == WM_T_PCH2) &&
6457 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
6458 		wm_set_mdio_slow_mode_hv(sc);
6459 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6460 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6461 	}
6462 
6463 	/*
6464 	 * (For ICH8 variants)
6465 	 * If PHY detection failed, use BM's r/w function and retry.
6466 	 */
6467 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6468 		/* if failed, retry with *_bm_* */
6469 		mii->mii_readreg = wm_gmii_bm_readreg;
6470 		mii->mii_writereg = wm_gmii_bm_writereg;
6471 
6472 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6473 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6474 	}
6475 
6476 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6477 		/* Any PHY wasn't find */
6478 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6479 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6480 		sc->sc_phytype = WMPHY_NONE;
6481 	} else {
6482 		/*
6483 		 * PHY Found!
6484 		 * Check PHY type.
6485 		 */
6486 		uint32_t model;
6487 		struct mii_softc *child;
6488 
6489 		child = LIST_FIRST(&mii->mii_phys);
6490 		if (device_is_a(child->mii_dev, "igphy")) {
6491 			struct igphy_softc *isc = (struct igphy_softc *)child;
6492 
6493 			model = isc->sc_mii.mii_mpd_model;
6494 			if (model == MII_MODEL_yyINTEL_I82566)
6495 				sc->sc_phytype = WMPHY_IGP_3;
6496 		}
6497 
6498 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6499 	}
6500 }
6501 
6502 /*
6503  * wm_gmii_mediastatus:	[ifmedia interface function]
6504  *
6505  *	Get the current interface media status on a 1000BASE-T device.
6506  */
6507 static void
6508 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6509 {
6510 	struct wm_softc *sc = ifp->if_softc;
6511 
6512 	ether_mediastatus(ifp, ifmr);
6513 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6514 	    | sc->sc_flowflags;
6515 }
6516 
6517 /*
6518  * wm_gmii_mediachange:	[ifmedia interface function]
6519  *
6520  *	Set hardware to newly-selected media on a 1000BASE-T device.
6521  */
6522 static int
6523 wm_gmii_mediachange(struct ifnet *ifp)
6524 {
6525 	struct wm_softc *sc = ifp->if_softc;
6526 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6527 	int rc;
6528 
6529 	if ((ifp->if_flags & IFF_UP) == 0)
6530 		return 0;
6531 
6532 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6533 	sc->sc_ctrl |= CTRL_SLU;
6534 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6535 	    || (sc->sc_type > WM_T_82543)) {
6536 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6537 	} else {
6538 		sc->sc_ctrl &= ~CTRL_ASDE;
6539 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6540 		if (ife->ifm_media & IFM_FDX)
6541 			sc->sc_ctrl |= CTRL_FD;
6542 		switch (IFM_SUBTYPE(ife->ifm_media)) {
6543 		case IFM_10_T:
6544 			sc->sc_ctrl |= CTRL_SPEED_10;
6545 			break;
6546 		case IFM_100_TX:
6547 			sc->sc_ctrl |= CTRL_SPEED_100;
6548 			break;
6549 		case IFM_1000_T:
6550 			sc->sc_ctrl |= CTRL_SPEED_1000;
6551 			break;
6552 		default:
6553 			panic("wm_gmii_mediachange: bad media 0x%x",
6554 			    ife->ifm_media);
6555 		}
6556 	}
6557 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6558 	if (sc->sc_type <= WM_T_82543)
6559 		wm_gmii_reset(sc);
6560 
6561 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6562 		return 0;
6563 	return rc;
6564 }
6565 
6566 #define	MDI_IO		CTRL_SWDPIN(2)
6567 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
6568 #define	MDI_CLK		CTRL_SWDPIN(3)
6569 
6570 static void
6571 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6572 {
6573 	uint32_t i, v;
6574 
6575 	v = CSR_READ(sc, WMREG_CTRL);
6576 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6577 	v |= MDI_DIR | CTRL_SWDPIO(3);
6578 
6579 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6580 		if (data & i)
6581 			v |= MDI_IO;
6582 		else
6583 			v &= ~MDI_IO;
6584 		CSR_WRITE(sc, WMREG_CTRL, v);
6585 		delay(10);
6586 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6587 		delay(10);
6588 		CSR_WRITE(sc, WMREG_CTRL, v);
6589 		delay(10);
6590 	}
6591 }
6592 
6593 static uint32_t
6594 i82543_mii_recvbits(struct wm_softc *sc)
6595 {
6596 	uint32_t v, i, data = 0;
6597 
6598 	v = CSR_READ(sc, WMREG_CTRL);
6599 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6600 	v |= CTRL_SWDPIO(3);
6601 
6602 	CSR_WRITE(sc, WMREG_CTRL, v);
6603 	delay(10);
6604 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6605 	delay(10);
6606 	CSR_WRITE(sc, WMREG_CTRL, v);
6607 	delay(10);
6608 
6609 	for (i = 0; i < 16; i++) {
6610 		data <<= 1;
6611 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6612 		delay(10);
6613 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6614 			data |= 1;
6615 		CSR_WRITE(sc, WMREG_CTRL, v);
6616 		delay(10);
6617 	}
6618 
6619 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6620 	delay(10);
6621 	CSR_WRITE(sc, WMREG_CTRL, v);
6622 	delay(10);
6623 
6624 	return data;
6625 }
6626 
6627 #undef MDI_IO
6628 #undef MDI_DIR
6629 #undef MDI_CLK
6630 
6631 /*
6632  * wm_gmii_i82543_readreg:	[mii interface function]
6633  *
6634  *	Read a PHY register on the GMII (i82543 version).
6635  */
6636 static int
6637 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6638 {
6639 	struct wm_softc *sc = device_private(self);
6640 	int rv;
6641 
6642 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
6643 	i82543_mii_sendbits(sc, reg | (phy << 5) |
6644 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6645 	rv = i82543_mii_recvbits(sc) & 0xffff;
6646 
6647 	DPRINTF(WM_DEBUG_GMII,
6648 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6649 	    device_xname(sc->sc_dev), phy, reg, rv));
6650 
6651 	return rv;
6652 }
6653 
6654 /*
6655  * wm_gmii_i82543_writereg:	[mii interface function]
6656  *
6657  *	Write a PHY register on the GMII (i82543 version).
6658  */
6659 static void
6660 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6661 {
6662 	struct wm_softc *sc = device_private(self);
6663 
6664 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
6665 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6666 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6667 	    (MII_COMMAND_START << 30), 32);
6668 }
6669 
6670 /*
6671  * wm_gmii_i82544_readreg:	[mii interface function]
6672  *
6673  *	Read a PHY register on the GMII.
6674  */
6675 static int
6676 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6677 {
6678 	struct wm_softc *sc = device_private(self);
6679 	uint32_t mdic = 0;
6680 	int i, rv;
6681 
6682 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6683 	    MDIC_REGADD(reg));
6684 
6685 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6686 		mdic = CSR_READ(sc, WMREG_MDIC);
6687 		if (mdic & MDIC_READY)
6688 			break;
6689 		delay(50);
6690 	}
6691 
6692 	if ((mdic & MDIC_READY) == 0) {
6693 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6694 		    device_xname(sc->sc_dev), phy, reg);
6695 		rv = 0;
6696 	} else if (mdic & MDIC_E) {
6697 #if 0 /* This is normal if no PHY is present. */
6698 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6699 		    device_xname(sc->sc_dev), phy, reg);
6700 #endif
6701 		rv = 0;
6702 	} else {
6703 		rv = MDIC_DATA(mdic);
6704 		if (rv == 0xffff)
6705 			rv = 0;
6706 	}
6707 
6708 	return rv;
6709 }
6710 
6711 /*
6712  * wm_gmii_i82544_writereg:	[mii interface function]
6713  *
6714  *	Write a PHY register on the GMII.
6715  */
6716 static void
6717 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6718 {
6719 	struct wm_softc *sc = device_private(self);
6720 	uint32_t mdic = 0;
6721 	int i;
6722 
6723 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6724 	    MDIC_REGADD(reg) | MDIC_DATA(val));
6725 
6726 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6727 		mdic = CSR_READ(sc, WMREG_MDIC);
6728 		if (mdic & MDIC_READY)
6729 			break;
6730 		delay(50);
6731 	}
6732 
6733 	if ((mdic & MDIC_READY) == 0)
6734 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6735 		    device_xname(sc->sc_dev), phy, reg);
6736 	else if (mdic & MDIC_E)
6737 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6738 		    device_xname(sc->sc_dev), phy, reg);
6739 }
6740 
6741 /*
6742  * wm_gmii_i80003_readreg:	[mii interface function]
6743  *
6744  *	Read a PHY register on the kumeran
6745  * This could be handled by the PHY layer if we didn't have to lock the
6746  * ressource ...
6747  */
6748 static int
6749 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6750 {
6751 	struct wm_softc *sc = device_private(self);
6752 	int sem;
6753 	int rv;
6754 
6755 	if (phy != 1) /* only one PHY on kumeran bus */
6756 		return 0;
6757 
6758 	sem = swfwphysem[sc->sc_funcid];
6759 	if (wm_get_swfw_semaphore(sc, sem)) {
6760 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6761 		    __func__);
6762 		return 0;
6763 	}
6764 
6765 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6766 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6767 		    reg >> GG82563_PAGE_SHIFT);
6768 	} else {
6769 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6770 		    reg >> GG82563_PAGE_SHIFT);
6771 	}
6772 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6773 	delay(200);
6774 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6775 	delay(200);
6776 
6777 	wm_put_swfw_semaphore(sc, sem);
6778 	return rv;
6779 }
6780 
6781 /*
6782  * wm_gmii_i80003_writereg:	[mii interface function]
6783  *
6784  *	Write a PHY register on the kumeran.
6785  * This could be handled by the PHY layer if we didn't have to lock the
6786  * ressource ...
6787  */
6788 static void
6789 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6790 {
6791 	struct wm_softc *sc = device_private(self);
6792 	int sem;
6793 
6794 	if (phy != 1) /* only one PHY on kumeran bus */
6795 		return;
6796 
6797 	sem = swfwphysem[sc->sc_funcid];
6798 	if (wm_get_swfw_semaphore(sc, sem)) {
6799 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6800 		    __func__);
6801 		return;
6802 	}
6803 
6804 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6805 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6806 		    reg >> GG82563_PAGE_SHIFT);
6807 	} else {
6808 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6809 		    reg >> GG82563_PAGE_SHIFT);
6810 	}
6811 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6812 	delay(200);
6813 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6814 	delay(200);
6815 
6816 	wm_put_swfw_semaphore(sc, sem);
6817 }
6818 
6819 /*
6820  * wm_gmii_bm_readreg:	[mii interface function]
6821  *
6822  *	Read a PHY register on the kumeran
6823  * This could be handled by the PHY layer if we didn't have to lock the
6824  * ressource ...
6825  */
6826 static int
6827 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6828 {
6829 	struct wm_softc *sc = device_private(self);
6830 	int sem;
6831 	int rv;
6832 
6833 	sem = swfwphysem[sc->sc_funcid];
6834 	if (wm_get_swfw_semaphore(sc, sem)) {
6835 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6836 		    __func__);
6837 		return 0;
6838 	}
6839 
6840 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6841 		if (phy == 1)
6842 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6843 			    reg);
6844 		else
6845 			wm_gmii_i82544_writereg(self, phy,
6846 			    GG82563_PHY_PAGE_SELECT,
6847 			    reg >> GG82563_PAGE_SHIFT);
6848 	}
6849 
6850 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6851 	wm_put_swfw_semaphore(sc, sem);
6852 	return rv;
6853 }
6854 
6855 /*
6856  * wm_gmii_bm_writereg:	[mii interface function]
6857  *
6858  *	Write a PHY register on the kumeran.
6859  * This could be handled by the PHY layer if we didn't have to lock the
6860  * ressource ...
6861  */
6862 static void
6863 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
6864 {
6865 	struct wm_softc *sc = device_private(self);
6866 	int sem;
6867 
6868 	sem = swfwphysem[sc->sc_funcid];
6869 	if (wm_get_swfw_semaphore(sc, sem)) {
6870 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6871 		    __func__);
6872 		return;
6873 	}
6874 
6875 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
6876 		if (phy == 1)
6877 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
6878 			    reg);
6879 		else
6880 			wm_gmii_i82544_writereg(self, phy,
6881 			    GG82563_PHY_PAGE_SELECT,
6882 			    reg >> GG82563_PAGE_SHIFT);
6883 	}
6884 
6885 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6886 	wm_put_swfw_semaphore(sc, sem);
6887 }
6888 
6889 static void
6890 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
6891 {
6892 	struct wm_softc *sc = device_private(self);
6893 	uint16_t regnum = BM_PHY_REG_NUM(offset);
6894 	uint16_t wuce;
6895 
6896 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
6897 	if (sc->sc_type == WM_T_PCH) {
6898 		/* XXX e1000 driver do nothing... why? */
6899 	}
6900 
6901 	/* Set page 769 */
6902 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6903 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6904 
6905 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
6906 
6907 	wuce &= ~BM_WUC_HOST_WU_BIT;
6908 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
6909 	    wuce | BM_WUC_ENABLE_BIT);
6910 
6911 	/* Select page 800 */
6912 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6913 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
6914 
6915 	/* Write page 800 */
6916 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
6917 
6918 	if (rd)
6919 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
6920 	else
6921 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
6922 
6923 	/* Set page 769 */
6924 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6925 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
6926 
6927 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
6928 }
6929 
6930 /*
6931  * wm_gmii_hv_readreg:	[mii interface function]
6932  *
6933  *	Read a PHY register on the kumeran
6934  * This could be handled by the PHY layer if we didn't have to lock the
6935  * ressource ...
6936  */
6937 static int
6938 wm_gmii_hv_readreg(device_t self, int phy, int reg)
6939 {
6940 	struct wm_softc *sc = device_private(self);
6941 	uint16_t page = BM_PHY_REG_PAGE(reg);
6942 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6943 	uint16_t val;
6944 	int rv;
6945 
6946 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6947 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6948 		    __func__);
6949 		return 0;
6950 	}
6951 
6952 	/* XXX Workaround failure in MDIO access while cable is disconnected */
6953 	if (sc->sc_phytype == WMPHY_82577) {
6954 		/* XXX must write */
6955 	}
6956 
6957 	/* Page 800 works differently than the rest so it has its own func */
6958 	if (page == BM_WUC_PAGE) {
6959 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
6960 		return val;
6961 	}
6962 
6963 	/*
6964 	 * Lower than page 768 works differently than the rest so it has its
6965 	 * own func
6966 	 */
6967 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
6968 		printf("gmii_hv_readreg!!!\n");
6969 		return 0;
6970 	}
6971 
6972 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
6973 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
6974 		    page << BME1000_PAGE_SHIFT);
6975 	}
6976 
6977 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
6978 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
6979 	return rv;
6980 }
6981 
6982 /*
6983  * wm_gmii_hv_writereg:	[mii interface function]
6984  *
6985  *	Write a PHY register on the kumeran.
6986  * This could be handled by the PHY layer if we didn't have to lock the
6987  * ressource ...
6988  */
6989 static void
6990 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
6991 {
6992 	struct wm_softc *sc = device_private(self);
6993 	uint16_t page = BM_PHY_REG_PAGE(reg);
6994 	uint16_t regnum = BM_PHY_REG_NUM(reg);
6995 
6996 	if (wm_get_swfw_semaphore(sc, SWFW_PHY0_SM)) {
6997 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6998 		    __func__);
6999 		return;
7000 	}
7001 
7002 	/* XXX Workaround failure in MDIO access while cable is disconnected */
7003 
7004 	/* Page 800 works differently than the rest so it has its own func */
7005 	if (page == BM_WUC_PAGE) {
7006 		uint16_t tmp;
7007 
7008 		tmp = val;
7009 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7010 		return;
7011 	}
7012 
7013 	/*
7014 	 * Lower than page 768 works differently than the rest so it has its
7015 	 * own func
7016 	 */
7017 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7018 		printf("gmii_hv_writereg!!!\n");
7019 		return;
7020 	}
7021 
7022 	/*
7023 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
7024 	 * Power Down (whenever bit 11 of the PHY control register is set)
7025 	 */
7026 
7027 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7028 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7029 		    page << BME1000_PAGE_SHIFT);
7030 	}
7031 
7032 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7033 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7034 }
7035 
7036 /*
7037  * wm_sgmii_readreg:	[mii interface function]
7038  *
7039  *	Read a PHY register on the SGMII
7040  * This could be handled by the PHY layer if we didn't have to lock the
7041  * ressource ...
7042  */
7043 static int
7044 wm_sgmii_readreg(device_t self, int phy, int reg)
7045 {
7046 	struct wm_softc *sc = device_private(self);
7047 	uint32_t i2ccmd;
7048 	int i, rv;
7049 
7050 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7051 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7052 		    __func__);
7053 		return 0;
7054 	}
7055 
7056 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7057 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7058 	    | I2CCMD_OPCODE_READ;
7059 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7060 
7061 	/* Poll the ready bit */
7062 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7063 		delay(50);
7064 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7065 		if (i2ccmd & I2CCMD_READY)
7066 			break;
7067 	}
7068 	if ((i2ccmd & I2CCMD_READY) == 0)
7069 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7070 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7071 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7072 
7073 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7074 
7075 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7076 	return rv;
7077 }
7078 
7079 /*
7080  * wm_sgmii_writereg:	[mii interface function]
7081  *
7082  *	Write a PHY register on the SGMII.
7083  * This could be handled by the PHY layer if we didn't have to lock the
7084  * ressource ...
7085  */
7086 static void
7087 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7088 {
7089 	struct wm_softc *sc = device_private(self);
7090 	uint32_t i2ccmd;
7091 	int i;
7092 
7093 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7094 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7095 		    __func__);
7096 		return;
7097 	}
7098 
7099 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7100 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7101 	    | I2CCMD_OPCODE_WRITE;
7102 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7103 
7104 	/* Poll the ready bit */
7105 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7106 		delay(50);
7107 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7108 		if (i2ccmd & I2CCMD_READY)
7109 			break;
7110 	}
7111 	if ((i2ccmd & I2CCMD_READY) == 0)
7112 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7113 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7114 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7115 
7116 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7117 }
7118 
7119 /*
7120  * wm_gmii_82580_readreg:	[mii interface function]
7121  *
7122  *	Read a PHY register on the 82580 and I350.
7123  * This could be handled by the PHY layer if we didn't have to lock the
7124  * ressource ...
7125  */
7126 static int
7127 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7128 {
7129 	struct wm_softc *sc = device_private(self);
7130 	int sem;
7131 	int rv;
7132 
7133 	sem = swfwphysem[sc->sc_funcid];
7134 	if (wm_get_swfw_semaphore(sc, sem)) {
7135 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7136 		    __func__);
7137 		return 0;
7138 	}
7139 
7140 	rv = wm_gmii_i82544_readreg(self, phy, reg);
7141 
7142 	wm_put_swfw_semaphore(sc, sem);
7143 	return rv;
7144 }
7145 
7146 /*
7147  * wm_gmii_82580_writereg:	[mii interface function]
7148  *
7149  *	Write a PHY register on the 82580 and I350.
7150  * This could be handled by the PHY layer if we didn't have to lock the
7151  * ressource ...
7152  */
7153 static void
7154 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7155 {
7156 	struct wm_softc *sc = device_private(self);
7157 	int sem;
7158 
7159 	sem = swfwphysem[sc->sc_funcid];
7160 	if (wm_get_swfw_semaphore(sc, sem)) {
7161 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7162 		    __func__);
7163 		return;
7164 	}
7165 
7166 	wm_gmii_i82544_writereg(self, phy, reg, val);
7167 
7168 	wm_put_swfw_semaphore(sc, sem);
7169 }
7170 
7171 /*
7172  * wm_gmii_statchg:	[mii interface function]
7173  *
7174  *	Callback from MII layer when media changes.
7175  */
7176 static void
7177 wm_gmii_statchg(struct ifnet *ifp)
7178 {
7179 	struct wm_softc *sc = ifp->if_softc;
7180 	struct mii_data *mii = &sc->sc_mii;
7181 
7182 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7183 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7184 	sc->sc_fcrtl &= ~FCRTL_XONE;
7185 
7186 	/*
7187 	 * Get flow control negotiation result.
7188 	 */
7189 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7190 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7191 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7192 		mii->mii_media_active &= ~IFM_ETH_FMASK;
7193 	}
7194 
7195 	if (sc->sc_flowflags & IFM_FLOW) {
7196 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7197 			sc->sc_ctrl |= CTRL_TFCE;
7198 			sc->sc_fcrtl |= FCRTL_XONE;
7199 		}
7200 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7201 			sc->sc_ctrl |= CTRL_RFCE;
7202 	}
7203 
7204 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
7205 		DPRINTF(WM_DEBUG_LINK,
7206 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7207 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7208 	} else {
7209 		DPRINTF(WM_DEBUG_LINK,
7210 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7211 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7212 	}
7213 
7214 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7215 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7216 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7217 						 : WMREG_FCRTL, sc->sc_fcrtl);
7218 	if (sc->sc_type == WM_T_80003) {
7219 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7220 		case IFM_1000_T:
7221 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7222 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7223 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
7224 			break;
7225 		default:
7226 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7227 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7228 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
7229 			break;
7230 		}
7231 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7232 	}
7233 }
7234 
7235 /*
7236  * wm_kmrn_readreg:
7237  *
7238  *	Read a kumeran register
7239  */
7240 static int
7241 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7242 {
7243 	int rv;
7244 
7245 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7246 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7247 			aprint_error_dev(sc->sc_dev,
7248 			    "%s: failed to get semaphore\n", __func__);
7249 			return 0;
7250 		}
7251 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7252 		if (wm_get_swfwhw_semaphore(sc)) {
7253 			aprint_error_dev(sc->sc_dev,
7254 			    "%s: failed to get semaphore\n", __func__);
7255 			return 0;
7256 		}
7257 	}
7258 
7259 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7260 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7261 	    KUMCTRLSTA_REN);
7262 	delay(2);
7263 
7264 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7265 
7266 	if (sc->sc_flags == WM_F_SWFW_SYNC)
7267 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7268 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7269 		wm_put_swfwhw_semaphore(sc);
7270 
7271 	return rv;
7272 }
7273 
7274 /*
7275  * wm_kmrn_writereg:
7276  *
7277  *	Write a kumeran register
7278  */
7279 static void
7280 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7281 {
7282 
7283 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7284 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7285 			aprint_error_dev(sc->sc_dev,
7286 			    "%s: failed to get semaphore\n", __func__);
7287 			return;
7288 		}
7289 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7290 		if (wm_get_swfwhw_semaphore(sc)) {
7291 			aprint_error_dev(sc->sc_dev,
7292 			    "%s: failed to get semaphore\n", __func__);
7293 			return;
7294 		}
7295 	}
7296 
7297 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7298 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7299 	    (val & KUMCTRLSTA_MASK));
7300 
7301 	if (sc->sc_flags == WM_F_SWFW_SYNC)
7302 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7303 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7304 		wm_put_swfwhw_semaphore(sc);
7305 }
7306 
7307 static int
7308 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7309 {
7310 	uint32_t eecd = 0;
7311 
7312 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7313 	    || sc->sc_type == WM_T_82583) {
7314 		eecd = CSR_READ(sc, WMREG_EECD);
7315 
7316 		/* Isolate bits 15 & 16 */
7317 		eecd = ((eecd >> 15) & 0x03);
7318 
7319 		/* If both bits are set, device is Flash type */
7320 		if (eecd == 0x03)
7321 			return 0;
7322 	}
7323 	return 1;
7324 }
7325 
7326 static int
7327 wm_get_swsm_semaphore(struct wm_softc *sc)
7328 {
7329 	int32_t timeout;
7330 	uint32_t swsm;
7331 
7332 	/* Get the FW semaphore. */
7333 	timeout = 1000 + 1; /* XXX */
7334 	while (timeout) {
7335 		swsm = CSR_READ(sc, WMREG_SWSM);
7336 		swsm |= SWSM_SWESMBI;
7337 		CSR_WRITE(sc, WMREG_SWSM, swsm);
7338 		/* if we managed to set the bit we got the semaphore. */
7339 		swsm = CSR_READ(sc, WMREG_SWSM);
7340 		if (swsm & SWSM_SWESMBI)
7341 			break;
7342 
7343 		delay(50);
7344 		timeout--;
7345 	}
7346 
7347 	if (timeout == 0) {
7348 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7349 		/* Release semaphores */
7350 		wm_put_swsm_semaphore(sc);
7351 		return 1;
7352 	}
7353 	return 0;
7354 }
7355 
7356 static void
7357 wm_put_swsm_semaphore(struct wm_softc *sc)
7358 {
7359 	uint32_t swsm;
7360 
7361 	swsm = CSR_READ(sc, WMREG_SWSM);
7362 	swsm &= ~(SWSM_SWESMBI);
7363 	CSR_WRITE(sc, WMREG_SWSM, swsm);
7364 }
7365 
7366 static int
7367 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7368 {
7369 	uint32_t swfw_sync;
7370 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7371 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7372 	int timeout = 200;
7373 
7374 	for (timeout = 0; timeout < 200; timeout++) {
7375 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7376 			if (wm_get_swsm_semaphore(sc)) {
7377 				aprint_error_dev(sc->sc_dev,
7378 				    "%s: failed to get semaphore\n",
7379 				    __func__);
7380 				return 1;
7381 			}
7382 		}
7383 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7384 		if ((swfw_sync & (swmask | fwmask)) == 0) {
7385 			swfw_sync |= swmask;
7386 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7387 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7388 				wm_put_swsm_semaphore(sc);
7389 			return 0;
7390 		}
7391 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7392 			wm_put_swsm_semaphore(sc);
7393 		delay(5000);
7394 	}
7395 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7396 	    device_xname(sc->sc_dev), mask, swfw_sync);
7397 	return 1;
7398 }
7399 
7400 static void
7401 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7402 {
7403 	uint32_t swfw_sync;
7404 
7405 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7406 		while (wm_get_swsm_semaphore(sc) != 0)
7407 			continue;
7408 	}
7409 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7410 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7411 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7412 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7413 		wm_put_swsm_semaphore(sc);
7414 }
7415 
7416 static int
7417 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7418 {
7419 	uint32_t ext_ctrl;
7420 	int timeout = 200;
7421 
7422 	for (timeout = 0; timeout < 200; timeout++) {
7423 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7424 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7425 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7426 
7427 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7428 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7429 			return 0;
7430 		delay(5000);
7431 	}
7432 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7433 	    device_xname(sc->sc_dev), ext_ctrl);
7434 	return 1;
7435 }
7436 
7437 static void
7438 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7439 {
7440 	uint32_t ext_ctrl;
7441 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7442 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7443 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7444 }
7445 
7446 static int
7447 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7448 {
7449 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7450 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7451 
7452 	if ((sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)) {
7453 		/* Value of bit 22 corresponds to the flash bank we're on. */
7454 		*bank = (CSR_READ(sc, WMREG_EECD) & EECD_SEC1VAL) ? 1 : 0;
7455 	} else {
7456 		uint8_t sig_byte;
7457 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
7458 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE)
7459 			*bank = 0;
7460 		else {
7461 			wm_read_ich8_byte(sc, act_offset + bank1_offset,
7462 			    &sig_byte);
7463 			if ((sig_byte & ICH_NVM_VALID_SIG_MASK)
7464 			    == ICH_NVM_SIG_VALUE)
7465 				*bank = 1;
7466 			else {
7467 				aprint_error_dev(sc->sc_dev,
7468 				    "EEPROM not present\n");
7469 				return -1;
7470 			}
7471 		}
7472 	}
7473 
7474 	return 0;
7475 }
7476 
7477 /******************************************************************************
7478  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7479  * register.
7480  *
7481  * sc - Struct containing variables accessed by shared code
7482  * offset - offset of word in the EEPROM to read
7483  * data - word read from the EEPROM
7484  * words - number of words to read
7485  *****************************************************************************/
7486 static int
7487 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7488 {
7489 	int32_t  error = 0;
7490 	uint32_t flash_bank = 0;
7491 	uint32_t act_offset = 0;
7492 	uint32_t bank_offset = 0;
7493 	uint16_t word = 0;
7494 	uint16_t i = 0;
7495 
7496 	/* We need to know which is the valid flash bank.  In the event
7497 	 * that we didn't allocate eeprom_shadow_ram, we may not be
7498 	 * managing flash_bank.  So it cannot be trusted and needs
7499 	 * to be updated with each read.
7500 	 */
7501 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7502 	if (error) {
7503 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7504 		    __func__);
7505 		return error;
7506 	}
7507 
7508 	/*
7509 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
7510 	 * size
7511 	 */
7512 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7513 
7514 	error = wm_get_swfwhw_semaphore(sc);
7515 	if (error) {
7516 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7517 		    __func__);
7518 		return error;
7519 	}
7520 
7521 	for (i = 0; i < words; i++) {
7522 		/* The NVM part needs a byte offset, hence * 2 */
7523 		act_offset = bank_offset + ((offset + i) * 2);
7524 		error = wm_read_ich8_word(sc, act_offset, &word);
7525 		if (error) {
7526 			aprint_error_dev(sc->sc_dev,
7527 			    "%s: failed to read NVM\n", __func__);
7528 			break;
7529 		}
7530 		data[i] = word;
7531 	}
7532 
7533 	wm_put_swfwhw_semaphore(sc);
7534 	return error;
7535 }
7536 
7537 /******************************************************************************
7538  * This function does initial flash setup so that a new read/write/erase cycle
7539  * can be started.
7540  *
7541  * sc - The pointer to the hw structure
7542  ****************************************************************************/
7543 static int32_t
7544 wm_ich8_cycle_init(struct wm_softc *sc)
7545 {
7546 	uint16_t hsfsts;
7547 	int32_t error = 1;
7548 	int32_t i     = 0;
7549 
7550 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7551 
7552 	/* May be check the Flash Des Valid bit in Hw status */
7553 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7554 		return error;
7555 	}
7556 
7557 	/* Clear FCERR in Hw status by writing 1 */
7558 	/* Clear DAEL in Hw status by writing a 1 */
7559 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7560 
7561 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7562 
7563 	/*
7564 	 * Either we should have a hardware SPI cycle in progress bit to check
7565 	 * against, in order to start a new cycle or FDONE bit should be
7566 	 * changed in the hardware so that it is 1 after harware reset, which
7567 	 * can then be used as an indication whether a cycle is in progress or
7568 	 * has been completed .. we should also have some software semaphore
7569 	 * mechanism to guard FDONE or the cycle in progress bit so that two
7570 	 * threads access to those bits can be sequentiallized or a way so that
7571 	 * 2 threads dont start the cycle at the same time
7572 	 */
7573 
7574 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7575 		/*
7576 		 * There is no cycle running at present, so we can start a
7577 		 * cycle
7578 		 */
7579 
7580 		/* Begin by setting Flash Cycle Done. */
7581 		hsfsts |= HSFSTS_DONE;
7582 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7583 		error = 0;
7584 	} else {
7585 		/*
7586 		 * otherwise poll for sometime so the current cycle has a
7587 		 * chance to end before giving up.
7588 		 */
7589 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7590 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7591 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7592 				error = 0;
7593 				break;
7594 			}
7595 			delay(1);
7596 		}
7597 		if (error == 0) {
7598 			/*
7599 			 * Successful in waiting for previous cycle to timeout,
7600 			 * now set the Flash Cycle Done.
7601 			 */
7602 			hsfsts |= HSFSTS_DONE;
7603 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7604 		}
7605 	}
7606 	return error;
7607 }
7608 
7609 /******************************************************************************
7610  * This function starts a flash cycle and waits for its completion
7611  *
7612  * sc - The pointer to the hw structure
7613  ****************************************************************************/
7614 static int32_t
7615 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7616 {
7617 	uint16_t hsflctl;
7618 	uint16_t hsfsts;
7619 	int32_t error = 1;
7620 	uint32_t i = 0;
7621 
7622 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7623 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7624 	hsflctl |= HSFCTL_GO;
7625 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7626 
7627 	/* wait till FDONE bit is set to 1 */
7628 	do {
7629 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7630 		if (hsfsts & HSFSTS_DONE)
7631 			break;
7632 		delay(1);
7633 		i++;
7634 	} while (i < timeout);
7635 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7636 		error = 0;
7637 
7638 	return error;
7639 }
7640 
7641 /******************************************************************************
7642  * Reads a byte or word from the NVM using the ICH8 flash access registers.
7643  *
7644  * sc - The pointer to the hw structure
7645  * index - The index of the byte or word to read.
7646  * size - Size of data to read, 1=byte 2=word
7647  * data - Pointer to the word to store the value read.
7648  *****************************************************************************/
7649 static int32_t
7650 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7651     uint32_t size, uint16_t* data)
7652 {
7653 	uint16_t hsfsts;
7654 	uint16_t hsflctl;
7655 	uint32_t flash_linear_address;
7656 	uint32_t flash_data = 0;
7657 	int32_t error = 1;
7658 	int32_t count = 0;
7659 
7660 	if (size < 1  || size > 2 || data == 0x0 ||
7661 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
7662 		return error;
7663 
7664 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7665 	    sc->sc_ich8_flash_base;
7666 
7667 	do {
7668 		delay(1);
7669 		/* Steps */
7670 		error = wm_ich8_cycle_init(sc);
7671 		if (error)
7672 			break;
7673 
7674 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7675 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7676 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7677 		    & HSFCTL_BCOUNT_MASK;
7678 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7679 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7680 
7681 		/*
7682 		 * Write the last 24 bits of index into Flash Linear address
7683 		 * field in Flash Address
7684 		 */
7685 		/* TODO: TBD maybe check the index against the size of flash */
7686 
7687 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7688 
7689 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7690 
7691 		/*
7692 		 * Check if FCERR is set to 1, if set to 1, clear it and try
7693 		 * the whole sequence a few more times, else read in (shift in)
7694 		 * the Flash Data0, the order is least significant byte first
7695 		 * msb to lsb
7696 		 */
7697 		if (error == 0) {
7698 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7699 			if (size == 1)
7700 				*data = (uint8_t)(flash_data & 0x000000FF);
7701 			else if (size == 2)
7702 				*data = (uint16_t)(flash_data & 0x0000FFFF);
7703 			break;
7704 		} else {
7705 			/*
7706 			 * If we've gotten here, then things are probably
7707 			 * completely hosed, but if the error condition is
7708 			 * detected, it won't hurt to give it another try...
7709 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7710 			 */
7711 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7712 			if (hsfsts & HSFSTS_ERR) {
7713 				/* Repeat for some time before giving up. */
7714 				continue;
7715 			} else if ((hsfsts & HSFSTS_DONE) == 0)
7716 				break;
7717 		}
7718 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7719 
7720 	return error;
7721 }
7722 
7723 /******************************************************************************
7724  * Reads a single byte from the NVM using the ICH8 flash access registers.
7725  *
7726  * sc - pointer to wm_hw structure
7727  * index - The index of the byte to read.
7728  * data - Pointer to a byte to store the value read.
7729  *****************************************************************************/
7730 static int32_t
7731 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7732 {
7733 	int32_t status;
7734 	uint16_t word = 0;
7735 
7736 	status = wm_read_ich8_data(sc, index, 1, &word);
7737 	if (status == 0)
7738 		*data = (uint8_t)word;
7739 	else
7740 		*data = 0;
7741 
7742 	return status;
7743 }
7744 
7745 /******************************************************************************
7746  * Reads a word from the NVM using the ICH8 flash access registers.
7747  *
7748  * sc - pointer to wm_hw structure
7749  * index - The starting byte index of the word to read.
7750  * data - Pointer to a word to store the value read.
7751  *****************************************************************************/
7752 static int32_t
7753 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7754 {
7755 	int32_t status;
7756 
7757 	status = wm_read_ich8_data(sc, index, 2, data);
7758 	return status;
7759 }
7760 
7761 static int
7762 wm_check_mng_mode(struct wm_softc *sc)
7763 {
7764 	int rv;
7765 
7766 	switch (sc->sc_type) {
7767 	case WM_T_ICH8:
7768 	case WM_T_ICH9:
7769 	case WM_T_ICH10:
7770 	case WM_T_PCH:
7771 	case WM_T_PCH2:
7772 		rv = wm_check_mng_mode_ich8lan(sc);
7773 		break;
7774 	case WM_T_82574:
7775 	case WM_T_82583:
7776 		rv = wm_check_mng_mode_82574(sc);
7777 		break;
7778 	case WM_T_82571:
7779 	case WM_T_82572:
7780 	case WM_T_82573:
7781 	case WM_T_80003:
7782 		rv = wm_check_mng_mode_generic(sc);
7783 		break;
7784 	default:
7785 		/* noting to do */
7786 		rv = 0;
7787 		break;
7788 	}
7789 
7790 	return rv;
7791 }
7792 
7793 static int
7794 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
7795 {
7796 	uint32_t fwsm;
7797 
7798 	fwsm = CSR_READ(sc, WMREG_FWSM);
7799 
7800 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
7801 		return 1;
7802 
7803 	return 0;
7804 }
7805 
7806 static int
7807 wm_check_mng_mode_82574(struct wm_softc *sc)
7808 {
7809 	uint16_t data;
7810 
7811 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
7812 
7813 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
7814 		return 1;
7815 
7816 	return 0;
7817 }
7818 
7819 static int
7820 wm_check_mng_mode_generic(struct wm_softc *sc)
7821 {
7822 	uint32_t fwsm;
7823 
7824 	fwsm = CSR_READ(sc, WMREG_FWSM);
7825 
7826 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
7827 		return 1;
7828 
7829 	return 0;
7830 }
7831 
7832 static int
7833 wm_enable_mng_pass_thru(struct wm_softc *sc)
7834 {
7835 	uint32_t manc, fwsm, factps;
7836 
7837 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
7838 		return 0;
7839 
7840 	manc = CSR_READ(sc, WMREG_MANC);
7841 
7842 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
7843 		device_xname(sc->sc_dev), manc));
7844 	if (((manc & MANC_RECV_TCO_EN) == 0)
7845 	    || ((manc & MANC_EN_MAC_ADDR_FILTER) == 0))
7846 		return 0;
7847 
7848 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
7849 		fwsm = CSR_READ(sc, WMREG_FWSM);
7850 		factps = CSR_READ(sc, WMREG_FACTPS);
7851 		if (((factps & FACTPS_MNGCG) == 0)
7852 		    && ((fwsm & FWSM_MODE_MASK)
7853 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
7854 			return 1;
7855 	} else if (((manc & MANC_SMBUS_EN) != 0)
7856 	    && ((manc & MANC_ASF_EN) == 0))
7857 		return 1;
7858 
7859 	return 0;
7860 }
7861 
7862 static int
7863 wm_check_reset_block(struct wm_softc *sc)
7864 {
7865 	uint32_t reg;
7866 
7867 	switch (sc->sc_type) {
7868 	case WM_T_ICH8:
7869 	case WM_T_ICH9:
7870 	case WM_T_ICH10:
7871 	case WM_T_PCH:
7872 	case WM_T_PCH2:
7873 		reg = CSR_READ(sc, WMREG_FWSM);
7874 		if ((reg & FWSM_RSPCIPHY) != 0)
7875 			return 0;
7876 		else
7877 			return -1;
7878 		break;
7879 	case WM_T_82571:
7880 	case WM_T_82572:
7881 	case WM_T_82573:
7882 	case WM_T_82574:
7883 	case WM_T_82583:
7884 	case WM_T_80003:
7885 		reg = CSR_READ(sc, WMREG_MANC);
7886 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
7887 			return -1;
7888 		else
7889 			return 0;
7890 		break;
7891 	default:
7892 		/* no problem */
7893 		break;
7894 	}
7895 
7896 	return 0;
7897 }
7898 
7899 static void
7900 wm_get_hw_control(struct wm_softc *sc)
7901 {
7902 	uint32_t reg;
7903 
7904 	switch (sc->sc_type) {
7905 	case WM_T_82573:
7906 		reg = CSR_READ(sc, WMREG_SWSM);
7907 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
7908 		break;
7909 	case WM_T_82571:
7910 	case WM_T_82572:
7911 	case WM_T_82574:
7912 	case WM_T_82583:
7913 	case WM_T_80003:
7914 	case WM_T_ICH8:
7915 	case WM_T_ICH9:
7916 	case WM_T_ICH10:
7917 	case WM_T_PCH:
7918 	case WM_T_PCH2:
7919 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7920 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
7921 		break;
7922 	default:
7923 		break;
7924 	}
7925 }
7926 
7927 static void
7928 wm_release_hw_control(struct wm_softc *sc)
7929 {
7930 	uint32_t reg;
7931 
7932 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
7933 		return;
7934 
7935 	if (sc->sc_type == WM_T_82573) {
7936 		reg = CSR_READ(sc, WMREG_SWSM);
7937 		reg &= ~SWSM_DRV_LOAD;
7938 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
7939 	} else {
7940 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7941 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
7942 	}
7943 }
7944 
7945 /* XXX Currently TBI only */
7946 static int
7947 wm_check_for_link(struct wm_softc *sc)
7948 {
7949 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7950 	uint32_t rxcw;
7951 	uint32_t ctrl;
7952 	uint32_t status;
7953 	uint32_t sig;
7954 
7955 	rxcw = CSR_READ(sc, WMREG_RXCW);
7956 	ctrl = CSR_READ(sc, WMREG_CTRL);
7957 	status = CSR_READ(sc, WMREG_STATUS);
7958 
7959 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
7960 
7961 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
7962 		device_xname(sc->sc_dev), __func__,
7963 		((ctrl & CTRL_SWDPIN(1)) == sig),
7964 		((status & STATUS_LU) != 0),
7965 		((rxcw & RXCW_C) != 0)
7966 		    ));
7967 
7968 	/*
7969 	 * SWDPIN   LU RXCW
7970 	 *      0    0    0
7971 	 *      0    0    1	(should not happen)
7972 	 *      0    1    0	(should not happen)
7973 	 *      0    1    1	(should not happen)
7974 	 *      1    0    0	Disable autonego and force linkup
7975 	 *      1    0    1	got /C/ but not linkup yet
7976 	 *      1    1    0	(linkup)
7977 	 *      1    1    1	If IFM_AUTO, back to autonego
7978 	 *
7979 	 */
7980 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
7981 	    && ((status & STATUS_LU) == 0)
7982 	    && ((rxcw & RXCW_C) == 0)) {
7983 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
7984 			__func__));
7985 		sc->sc_tbi_linkup = 0;
7986 		/* Disable auto-negotiation in the TXCW register */
7987 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
7988 
7989 		/*
7990 		 * Force link-up and also force full-duplex.
7991 		 *
7992 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
7993 		 * so we should update sc->sc_ctrl
7994 		 */
7995 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
7996 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7997 	} else if (((status & STATUS_LU) != 0)
7998 	    && ((rxcw & RXCW_C) != 0)
7999 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8000 		sc->sc_tbi_linkup = 1;
8001 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8002 			__func__));
8003 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8004 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8005 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8006 	    && ((rxcw & RXCW_C) != 0)) {
8007 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
8008 	} else {
8009 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8010 			status));
8011 	}
8012 
8013 	return 0;
8014 }
8015 
8016 /* Work-around for 82566 Kumeran PCS lock loss */
8017 static void
8018 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8019 {
8020 	int miistatus, active, i;
8021 	int reg;
8022 
8023 	miistatus = sc->sc_mii.mii_media_status;
8024 
8025 	/* If the link is not up, do nothing */
8026 	if ((miistatus & IFM_ACTIVE) != 0)
8027 		return;
8028 
8029 	active = sc->sc_mii.mii_media_active;
8030 
8031 	/* Nothing to do if the link is other than 1Gbps */
8032 	if (IFM_SUBTYPE(active) != IFM_1000_T)
8033 		return;
8034 
8035 	for (i = 0; i < 10; i++) {
8036 		/* read twice */
8037 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8038 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8039 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8040 			goto out;	/* GOOD! */
8041 
8042 		/* Reset the PHY */
8043 		wm_gmii_reset(sc);
8044 		delay(5*1000);
8045 	}
8046 
8047 	/* Disable GigE link negotiation */
8048 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
8049 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8050 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8051 
8052 	/*
8053 	 * Call gig speed drop workaround on Gig disable before accessing
8054 	 * any PHY registers.
8055 	 */
8056 	wm_gig_downshift_workaround_ich8lan(sc);
8057 
8058 out:
8059 	return;
8060 }
8061 
8062 /* WOL from S5 stops working */
8063 static void
8064 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8065 {
8066 	uint16_t kmrn_reg;
8067 
8068 	/* Only for igp3 */
8069 	if (sc->sc_phytype == WMPHY_IGP_3) {
8070 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8071 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8072 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8073 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8074 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8075 	}
8076 }
8077 
8078 #ifdef WM_WOL
8079 /* Power down workaround on D3 */
8080 static void
8081 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8082 {
8083 	uint32_t reg;
8084 	int i;
8085 
8086 	for (i = 0; i < 2; i++) {
8087 		/* Disable link */
8088 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8089 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8090 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8091 
8092 		/*
8093 		 * Call gig speed drop workaround on Gig disable before
8094 		 * accessing any PHY registers
8095 		 */
8096 		if (sc->sc_type == WM_T_ICH8)
8097 			wm_gig_downshift_workaround_ich8lan(sc);
8098 
8099 		/* Write VR power-down enable */
8100 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8101 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8102 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8103 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8104 
8105 		/* Read it back and test */
8106 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8107 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8108 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8109 			break;
8110 
8111 		/* Issue PHY reset and repeat at most one more time */
8112 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8113 	}
8114 }
8115 #endif /* WM_WOL */
8116 
8117 /*
8118  * Workaround for pch's PHYs
8119  * XXX should be moved to new PHY driver?
8120  */
8121 static void
8122 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8123 {
8124 	if (sc->sc_phytype == WMPHY_82577)
8125 		wm_set_mdio_slow_mode_hv(sc);
8126 
8127 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8128 
8129 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8130 
8131 	/* 82578 */
8132 	if (sc->sc_phytype == WMPHY_82578) {
8133 		/* PCH rev. < 3 */
8134 		if (sc->sc_rev < 3) {
8135 			/* XXX 6 bit shift? Why? Is it page2? */
8136 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8137 			    0x66c0);
8138 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8139 			    0xffff);
8140 		}
8141 
8142 		/* XXX phy rev. < 2 */
8143 	}
8144 
8145 	/* Select page 0 */
8146 
8147 	/* XXX acquire semaphore */
8148 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8149 	/* XXX release semaphore */
8150 
8151 	/*
8152 	 * Configure the K1 Si workaround during phy reset assuming there is
8153 	 * link so that it disables K1 if link is in 1Gbps.
8154 	 */
8155 	wm_k1_gig_workaround_hv(sc, 1);
8156 }
8157 
8158 static void
8159 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8160 {
8161 
8162 	wm_set_mdio_slow_mode_hv(sc);
8163 }
8164 
8165 static void
8166 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8167 {
8168 	int k1_enable = sc->sc_nvm_k1_enabled;
8169 
8170 	/* XXX acquire semaphore */
8171 
8172 	if (link) {
8173 		k1_enable = 0;
8174 
8175 		/* Link stall fix for link up */
8176 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8177 	} else {
8178 		/* Link stall fix for link down */
8179 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8180 	}
8181 
8182 	wm_configure_k1_ich8lan(sc, k1_enable);
8183 
8184 	/* XXX release semaphore */
8185 }
8186 
8187 static void
8188 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8189 {
8190 	uint32_t reg;
8191 
8192 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8193 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8194 	    reg | HV_KMRN_MDIO_SLOW);
8195 }
8196 
8197 static void
8198 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8199 {
8200 	uint32_t ctrl, ctrl_ext, tmp;
8201 	uint16_t kmrn_reg;
8202 
8203 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8204 
8205 	if (k1_enable)
8206 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8207 	else
8208 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8209 
8210 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8211 
8212 	delay(20);
8213 
8214 	ctrl = CSR_READ(sc, WMREG_CTRL);
8215 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8216 
8217 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8218 	tmp |= CTRL_FRCSPD;
8219 
8220 	CSR_WRITE(sc, WMREG_CTRL, tmp);
8221 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8222 	delay(20);
8223 
8224 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
8225 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8226 	delay(20);
8227 }
8228 
8229 static void
8230 wm_smbustopci(struct wm_softc *sc)
8231 {
8232 	uint32_t fwsm;
8233 
8234 	fwsm = CSR_READ(sc, WMREG_FWSM);
8235 	if (((fwsm & FWSM_FW_VALID) == 0)
8236 	    && ((wm_check_reset_block(sc) == 0))) {
8237 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8238 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8239 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8240 		delay(10);
8241 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8242 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8243 		delay(50*1000);
8244 
8245 		/*
8246 		 * Gate automatic PHY configuration by hardware on non-managed
8247 		 * 82579
8248 		 */
8249 		if (sc->sc_type == WM_T_PCH2)
8250 			wm_gate_hw_phy_config_ich8lan(sc, 1);
8251 	}
8252 }
8253 
8254 static void
8255 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8256 {
8257 	uint32_t gcr;
8258 	pcireg_t ctrl2;
8259 
8260 	gcr = CSR_READ(sc, WMREG_GCR);
8261 
8262 	/* Only take action if timeout value is defaulted to 0 */
8263 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8264 		goto out;
8265 
8266 	if ((gcr & GCR_CAP_VER2) == 0) {
8267 		gcr |= GCR_CMPL_TMOUT_10MS;
8268 		goto out;
8269 	}
8270 
8271 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8272 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
8273 	ctrl2 |= WM_PCIE_DCSR2_16MS;
8274 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8275 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8276 
8277 out:
8278 	/* Disable completion timeout resend */
8279 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
8280 
8281 	CSR_WRITE(sc, WMREG_GCR, gcr);
8282 }
8283 
8284 /* special case - for 82575 - need to do manual init ... */
8285 static void
8286 wm_reset_init_script_82575(struct wm_softc *sc)
8287 {
8288 	/*
8289 	 * remark: this is untested code - we have no board without EEPROM
8290 	 *  same setup as mentioned int the freeBSD driver for the i82575
8291 	 */
8292 
8293 	/* SerDes configuration via SERDESCTRL */
8294 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8295 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8296 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8297 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8298 
8299 	/* CCM configuration via CCMCTL register */
8300 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8301 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8302 
8303 	/* PCIe lanes configuration */
8304 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8305 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8306 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8307 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8308 
8309 	/* PCIe PLL Configuration */
8310 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8311 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8312 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8313 }
8314 
8315 static void
8316 wm_init_manageability(struct wm_softc *sc)
8317 {
8318 
8319 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8320 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8321 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8322 
8323 		/* disabl hardware interception of ARP */
8324 		manc &= ~MANC_ARP_EN;
8325 
8326 		/* enable receiving management packets to the host */
8327 		if (sc->sc_type >= WM_T_82571) {
8328 			manc |= MANC_EN_MNG2HOST;
8329 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8330 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8331 
8332 		}
8333 
8334 		CSR_WRITE(sc, WMREG_MANC, manc);
8335 	}
8336 }
8337 
8338 static void
8339 wm_release_manageability(struct wm_softc *sc)
8340 {
8341 
8342 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8343 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8344 
8345 		if (sc->sc_type >= WM_T_82571)
8346 			manc &= ~MANC_EN_MNG2HOST;
8347 
8348 		CSR_WRITE(sc, WMREG_MANC, manc);
8349 	}
8350 }
8351 
8352 static void
8353 wm_get_wakeup(struct wm_softc *sc)
8354 {
8355 
8356 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8357 	switch (sc->sc_type) {
8358 	case WM_T_82573:
8359 	case WM_T_82583:
8360 		sc->sc_flags |= WM_F_HAS_AMT;
8361 		/* FALLTHROUGH */
8362 	case WM_T_80003:
8363 	case WM_T_82541:
8364 	case WM_T_82547:
8365 	case WM_T_82571:
8366 	case WM_T_82572:
8367 	case WM_T_82574:
8368 	case WM_T_82575:
8369 	case WM_T_82576:
8370 #if 0 /* XXX */
8371 	case WM_T_82580:
8372 	case WM_T_82580ER:
8373 	case WM_T_I350:
8374 #endif
8375 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8376 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8377 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8378 		break;
8379 	case WM_T_ICH8:
8380 	case WM_T_ICH9:
8381 	case WM_T_ICH10:
8382 	case WM_T_PCH:
8383 	case WM_T_PCH2:
8384 		sc->sc_flags |= WM_F_HAS_AMT;
8385 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8386 		break;
8387 	default:
8388 		break;
8389 	}
8390 
8391 	/* 1: HAS_MANAGE */
8392 	if (wm_enable_mng_pass_thru(sc) != 0)
8393 		sc->sc_flags |= WM_F_HAS_MANAGE;
8394 
8395 #ifdef WM_DEBUG
8396 	printf("\n");
8397 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8398 		printf("HAS_AMT,");
8399 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8400 		printf("ARC_SUBSYS_VALID,");
8401 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8402 		printf("ASF_FIRMWARE_PRES,");
8403 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8404 		printf("HAS_MANAGE,");
8405 	printf("\n");
8406 #endif
8407 	/*
8408 	 * Note that the WOL flags is set after the resetting of the eeprom
8409 	 * stuff
8410 	 */
8411 }
8412 
8413 #ifdef WM_WOL
8414 /* WOL in the newer chipset interfaces (pchlan) */
8415 static void
8416 wm_enable_phy_wakeup(struct wm_softc *sc)
8417 {
8418 #if 0
8419 	uint16_t preg;
8420 
8421 	/* Copy MAC RARs to PHY RARs */
8422 
8423 	/* Copy MAC MTA to PHY MTA */
8424 
8425 	/* Configure PHY Rx Control register */
8426 
8427 	/* Enable PHY wakeup in MAC register */
8428 
8429 	/* Configure and enable PHY wakeup in PHY registers */
8430 
8431 	/* Activate PHY wakeup */
8432 
8433 	/* XXX */
8434 #endif
8435 }
8436 
8437 static void
8438 wm_enable_wakeup(struct wm_softc *sc)
8439 {
8440 	uint32_t reg, pmreg;
8441 	pcireg_t pmode;
8442 
8443 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8444 		&pmreg, NULL) == 0)
8445 		return;
8446 
8447 	/* Advertise the wakeup capability */
8448 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8449 	    | CTRL_SWDPIN(3));
8450 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8451 
8452 	/* ICH workaround */
8453 	switch (sc->sc_type) {
8454 	case WM_T_ICH8:
8455 	case WM_T_ICH9:
8456 	case WM_T_ICH10:
8457 	case WM_T_PCH:
8458 	case WM_T_PCH2:
8459 		/* Disable gig during WOL */
8460 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8461 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8462 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8463 		if (sc->sc_type == WM_T_PCH)
8464 			wm_gmii_reset(sc);
8465 
8466 		/* Power down workaround */
8467 		if (sc->sc_phytype == WMPHY_82577) {
8468 			struct mii_softc *child;
8469 
8470 			/* Assume that the PHY is copper */
8471 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
8472 			if (child->mii_mpd_rev <= 2)
8473 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8474 				    (768 << 5) | 25, 0x0444); /* magic num */
8475 		}
8476 		break;
8477 	default:
8478 		break;
8479 	}
8480 
8481 	/* Keep the laser running on fiber adapters */
8482 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8483 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8484 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8485 		reg |= CTRL_EXT_SWDPIN(3);
8486 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8487 	}
8488 
8489 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8490 #if 0	/* for the multicast packet */
8491 	reg |= WUFC_MC;
8492 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8493 #endif
8494 
8495 	if (sc->sc_type == WM_T_PCH) {
8496 		wm_enable_phy_wakeup(sc);
8497 	} else {
8498 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8499 		CSR_WRITE(sc, WMREG_WUFC, reg);
8500 	}
8501 
8502 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8503 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8504 		|| (sc->sc_type == WM_T_PCH2))
8505 		    && (sc->sc_phytype == WMPHY_IGP_3))
8506 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8507 
8508 	/* Request PME */
8509 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8510 #if 0
8511 	/* Disable WOL */
8512 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8513 #else
8514 	/* For WOL */
8515 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8516 #endif
8517 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8518 }
8519 #endif /* WM_WOL */
8520 
8521 static bool
8522 wm_suspend(device_t self, const pmf_qual_t *qual)
8523 {
8524 	struct wm_softc *sc = device_private(self);
8525 
8526 	wm_release_manageability(sc);
8527 	wm_release_hw_control(sc);
8528 #ifdef WM_WOL
8529 	wm_enable_wakeup(sc);
8530 #endif
8531 
8532 	return true;
8533 }
8534 
8535 static bool
8536 wm_resume(device_t self, const pmf_qual_t *qual)
8537 {
8538 	struct wm_softc *sc = device_private(self);
8539 
8540 	wm_init_manageability(sc);
8541 
8542 	return true;
8543 }
8544 
8545 static void
8546 wm_set_eee_i350(struct wm_softc * sc)
8547 {
8548 	uint32_t ipcnfg, eeer;
8549 
8550 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8551 	eeer = CSR_READ(sc, WMREG_EEER);
8552 
8553 	if ((sc->sc_flags & WM_F_EEE) != 0) {
8554 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8555 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8556 		    | EEER_LPI_FC);
8557 	} else {
8558 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8559 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8560 		    | EEER_LPI_FC);
8561 	}
8562 
8563 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8564 	CSR_WRITE(sc, WMREG_EEER, eeer);
8565 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8566 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8567 }
8568