xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 6d322f2f4598f0d8a138f10ea648ec4fabe41f8b)
1 /*	$NetBSD: if_wm.c,v 1.265 2013/12/29 21:28:41 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.265 2013/12/29 21:28:41 msaitoh Exp $");
80 
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93 
94 #include <sys/rnd.h>
95 
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100 
101 #include <net/bpf.h>
102 
103 #include <netinet/in.h>			/* XXX for struct ip */
104 #include <netinet/in_systm.h>		/* XXX for struct ip */
105 #include <netinet/ip.h>			/* XXX for struct ip */
106 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
108 
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112 
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121 
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125 
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128 
129 #ifdef WM_DEBUG
130 #define	WM_DEBUG_LINK		0x01
131 #define	WM_DEBUG_TX		0x02
132 #define	WM_DEBUG_RX		0x04
133 #define	WM_DEBUG_GMII		0x08
134 #define	WM_DEBUG_MANAGE		0x10
135 #define	WM_DEBUG_NVM		0x20
136 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138 
139 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
140 #else
141 #define	DPRINTF(x, y)	/* nothing */
142 #endif /* WM_DEBUG */
143 
144 /*
145  * Transmit descriptor list size.  Due to errata, we can only have
146  * 256 hardware descriptors in the ring on < 82544, but we use 4096
147  * on >= 82544.  We tell the upper layers that they can queue a lot
148  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149  * of them at a time.
150  *
151  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
152  * chains containing many small mbufs have been observed in zero-copy
153  * situations with jumbo frames.
154  */
155 #define	WM_NTXSEGS		256
156 #define	WM_IFQUEUELEN		256
157 #define	WM_TXQUEUELEN_MAX	64
158 #define	WM_TXQUEUELEN_MAX_82547	16
159 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
160 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
161 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
162 #define	WM_NTXDESC_82542	256
163 #define	WM_NTXDESC_82544	4096
164 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
165 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
166 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
168 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169 
170 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
171 
172 /*
173  * Receive descriptor list size.  We have one Rx buffer for normal
174  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
175  * packet.  We allocate 256 receive descriptors, each with a 2k
176  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177  */
178 #define	WM_NRXDESC		256
179 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
180 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
181 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
182 
183 /*
184  * Control structures are DMA'd to the i82542 chip.  We allocate them in
185  * a single clump that maps to a single DMA segment to make several things
186  * easier.
187  */
188 struct wm_control_data_82544 {
189 	/*
190 	 * The receive descriptors.
191 	 */
192 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193 
194 	/*
195 	 * The transmit descriptors.  Put these at the end, because
196 	 * we might use a smaller number of them.
197 	 */
198 	union {
199 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
201 	} wdc_u;
202 };
203 
204 struct wm_control_data_82542 {
205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208 
209 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
210 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
212 
213 /*
214  * Software state for transmit jobs.
215  */
216 struct wm_txsoft {
217 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
218 	bus_dmamap_t txs_dmamap;	/* our DMA map */
219 	int txs_firstdesc;		/* first descriptor in packet */
220 	int txs_lastdesc;		/* last descriptor in packet */
221 	int txs_ndesc;			/* # of descriptors used */
222 };
223 
224 /*
225  * Software state for receive buffers.  Each descriptor gets a
226  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
227  * more than one buffer, we chain them together.
228  */
229 struct wm_rxsoft {
230 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
231 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
232 };
233 
234 #define WM_LINKUP_TIMEOUT	50
235 
236 static uint16_t swfwphysem[] = {
237 	SWFW_PHY0_SM,
238 	SWFW_PHY1_SM,
239 	SWFW_PHY2_SM,
240 	SWFW_PHY3_SM
241 };
242 
243 /*
244  * Software state per device.
245  */
246 struct wm_softc {
247 	device_t sc_dev;		/* generic device information */
248 	bus_space_tag_t sc_st;		/* bus space tag */
249 	bus_space_handle_t sc_sh;	/* bus space handle */
250 	bus_size_t sc_ss;		/* bus space size */
251 	bus_space_tag_t sc_iot;		/* I/O space tag */
252 	bus_space_handle_t sc_ioh;	/* I/O space handle */
253 	bus_size_t sc_ios;		/* I/O space size */
254 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
255 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
256 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
257 
258 	struct ethercom sc_ethercom;	/* ethernet common data */
259 	struct mii_data sc_mii;		/* MII/media information */
260 
261 	pci_chipset_tag_t sc_pc;
262 	pcitag_t sc_pcitag;
263 	int sc_bus_speed;		/* PCI/PCIX bus speed */
264 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
265 
266 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 	wm_chip_type sc_type;		/* MAC type */
268 	int sc_rev;			/* MAC revision */
269 	wm_phy_type sc_phytype;		/* PHY type */
270 	int sc_funcid;			/* unit number of the chip (0 to 3) */
271 	int sc_flags;			/* flags; see below */
272 	int sc_if_flags;		/* last if_flags */
273 	int sc_flowflags;		/* 802.3x flow control flags */
274 	int sc_align_tweak;
275 
276 	void *sc_ih;			/* interrupt cookie */
277 	callout_t sc_tick_ch;		/* tick callout */
278 
279 	int sc_ee_addrbits;		/* EEPROM address bits */
280 	int sc_ich8_flash_base;
281 	int sc_ich8_flash_bank_size;
282 	int sc_nvm_k1_enabled;
283 
284 	/*
285 	 * Software state for the transmit and receive descriptors.
286 	 */
287 	int sc_txnum;			/* must be a power of two */
288 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290 
291 	/*
292 	 * Control data structures.
293 	 */
294 	int sc_ntxdesc;			/* must be a power of two */
295 	struct wm_control_data_82544 *sc_control_data;
296 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
297 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
298 	int sc_cd_rseg;			/* real number of control segment */
299 	size_t sc_cd_size;		/* control data size */
300 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
301 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
302 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
303 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
304 
305 #ifdef WM_EVENT_COUNTERS
306 	/* Event counters. */
307 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
308 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
309 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
311 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
312 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
313 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
314 
315 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
316 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
317 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
318 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
319 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
320 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
321 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
322 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
323 
324 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
326 
327 	struct evcnt sc_ev_tu;		/* Tx underrun */
328 
329 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
330 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
331 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
332 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
333 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335 
336 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
337 
338 	int	sc_txfree;		/* number of free Tx descriptors */
339 	int	sc_txnext;		/* next ready Tx descriptor */
340 
341 	int	sc_txsfree;		/* number of free Tx jobs */
342 	int	sc_txsnext;		/* next free Tx job */
343 	int	sc_txsdirty;		/* dirty Tx jobs */
344 
345 	/* These 5 variables are used only on the 82547. */
346 	int	sc_txfifo_size;		/* Tx FIFO size */
347 	int	sc_txfifo_head;		/* current head of FIFO */
348 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
349 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
350 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
351 
352 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
353 
354 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
355 	int	sc_rxdiscard;
356 	int	sc_rxlen;
357 	struct mbuf *sc_rxhead;
358 	struct mbuf *sc_rxtail;
359 	struct mbuf **sc_rxtailp;
360 
361 	uint32_t sc_ctrl;		/* prototype CTRL register */
362 #if 0
363 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
364 #endif
365 	uint32_t sc_icr;		/* prototype interrupt bits */
366 	uint32_t sc_itr;		/* prototype intr throttling reg */
367 	uint32_t sc_tctl;		/* prototype TCTL register */
368 	uint32_t sc_rctl;		/* prototype RCTL register */
369 	uint32_t sc_txcw;		/* prototype TXCW register */
370 	uint32_t sc_tipg;		/* prototype TIPG register */
371 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
372 	uint32_t sc_pba;		/* prototype PBA register */
373 
374 	int sc_tbi_linkup;		/* TBI link status */
375 	int sc_tbi_anegticks;		/* autonegotiation ticks */
376 	int sc_tbi_ticks;		/* tbi ticks */
377 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
378 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
379 
380 	int sc_mchash_type;		/* multicast filter offset */
381 
382 	krndsource_t rnd_source;	/* random source */
383 };
384 
385 #define	WM_RXCHAIN_RESET(sc)						\
386 do {									\
387 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
388 	*(sc)->sc_rxtailp = NULL;					\
389 	(sc)->sc_rxlen = 0;						\
390 } while (/*CONSTCOND*/0)
391 
392 #define	WM_RXCHAIN_LINK(sc, m)						\
393 do {									\
394 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
395 	(sc)->sc_rxtailp = &(m)->m_next;				\
396 } while (/*CONSTCOND*/0)
397 
398 #ifdef WM_EVENT_COUNTERS
399 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
400 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
401 #else
402 #define	WM_EVCNT_INCR(ev)	/* nothing */
403 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
404 #endif
405 
406 #define	CSR_READ(sc, reg)						\
407 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408 #define	CSR_WRITE(sc, reg, val)						\
409 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410 #define	CSR_WRITE_FLUSH(sc)						\
411 	(void) CSR_READ((sc), WMREG_STATUS)
412 
413 #define ICH8_FLASH_READ32(sc, reg) \
414 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
416 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417 
418 #define ICH8_FLASH_READ16(sc, reg) \
419 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
421 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422 
423 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
424 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
425 
426 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427 #define	WM_CDTXADDR_HI(sc, x)						\
428 	(sizeof(bus_addr_t) == 8 ?					\
429 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430 
431 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432 #define	WM_CDRXADDR_HI(sc, x)						\
433 	(sizeof(bus_addr_t) == 8 ?					\
434 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435 
436 #define	WM_CDTXSYNC(sc, x, n, ops)					\
437 do {									\
438 	int __x, __n;							\
439 									\
440 	__x = (x);							\
441 	__n = (n);							\
442 									\
443 	/* If it will wrap around, sync to the end of the ring. */	\
444 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
445 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
446 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
447 		    (WM_NTXDESC(sc) - __x), (ops));			\
448 		__n -= (WM_NTXDESC(sc) - __x);				\
449 		__x = 0;						\
450 	}								\
451 									\
452 	/* Now sync whatever is left. */				\
453 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
454 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
455 } while (/*CONSTCOND*/0)
456 
457 #define	WM_CDRXSYNC(sc, x, ops)						\
458 do {									\
459 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
460 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
461 } while (/*CONSTCOND*/0)
462 
463 #define	WM_INIT_RXDESC(sc, x)						\
464 do {									\
465 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
466 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
467 	struct mbuf *__m = __rxs->rxs_mbuf;				\
468 									\
469 	/*								\
470 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
471 	 * so that the payload after the Ethernet header is aligned	\
472 	 * to a 4-byte boundary.					\
473 	 *								\
474 	 * XXX BRAINDAMAGE ALERT!					\
475 	 * The stupid chip uses the same size for every buffer, which	\
476 	 * is set in the Receive Control register.  We are using the 2K	\
477 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
478 	 * reason, we can't "scoot" packets longer than the standard	\
479 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
480 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
481 	 * the upper layer copy the headers.				\
482 	 */								\
483 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
484 									\
485 	wm_set_dma_addr(&__rxd->wrx_addr,				\
486 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 	__rxd->wrx_len = 0;						\
488 	__rxd->wrx_cksum = 0;						\
489 	__rxd->wrx_status = 0;						\
490 	__rxd->wrx_errors = 0;						\
491 	__rxd->wrx_special = 0;						\
492 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 									\
494 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
495 } while (/*CONSTCOND*/0)
496 
497 static void	wm_start(struct ifnet *);
498 static void	wm_nq_start(struct ifnet *);
499 static void	wm_watchdog(struct ifnet *);
500 static int	wm_ifflags_cb(struct ethercom *);
501 static int	wm_ioctl(struct ifnet *, u_long, void *);
502 static int	wm_init(struct ifnet *);
503 static void	wm_stop(struct ifnet *, int);
504 static bool	wm_suspend(device_t, const pmf_qual_t *);
505 static bool	wm_resume(device_t, const pmf_qual_t *);
506 
507 static void	wm_reset(struct wm_softc *);
508 static void	wm_rxdrain(struct wm_softc *);
509 static int	wm_add_rxbuf(struct wm_softc *, int);
510 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int	wm_validate_eeprom_checksum(struct wm_softc *);
513 static int	wm_check_alt_mac_addr(struct wm_softc *);
514 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void	wm_tick(void *);
516 
517 static void	wm_set_filter(struct wm_softc *);
518 static void	wm_set_vlan(struct wm_softc *);
519 
520 static int	wm_intr(void *);
521 static void	wm_txintr(struct wm_softc *);
522 static void	wm_rxintr(struct wm_softc *);
523 static void	wm_linkintr(struct wm_softc *, uint32_t);
524 
525 static void	wm_tbi_mediainit(struct wm_softc *);
526 static int	wm_tbi_mediachange(struct ifnet *);
527 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528 
529 static void	wm_tbi_set_linkled(struct wm_softc *);
530 static void	wm_tbi_check_link(struct wm_softc *);
531 
532 static void	wm_gmii_reset(struct wm_softc *);
533 
534 static int	wm_gmii_i82543_readreg(device_t, int, int);
535 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
536 static int	wm_gmii_i82544_readreg(device_t, int, int);
537 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
538 static int	wm_gmii_i80003_readreg(device_t, int, int);
539 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int	wm_gmii_bm_readreg(device_t, int, int);
541 static void	wm_gmii_bm_writereg(device_t, int, int, int);
542 static int	wm_gmii_hv_readreg(device_t, int, int);
543 static void	wm_gmii_hv_writereg(device_t, int, int, int);
544 static int	wm_gmii_82580_readreg(device_t, int, int);
545 static void	wm_gmii_82580_writereg(device_t, int, int, int);
546 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
547 static int	wm_sgmii_readreg(device_t, int, int);
548 static void	wm_sgmii_writereg(device_t, int, int, int);
549 
550 static void	wm_gmii_statchg(struct ifnet *);
551 
552 static int	wm_get_phy_id_82575(struct wm_softc *);
553 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
554 static int	wm_gmii_mediachange(struct ifnet *);
555 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
556 
557 static int	wm_kmrn_readreg(struct wm_softc *, int);
558 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
559 
560 static void	wm_set_spiaddrbits(struct wm_softc *);
561 static int	wm_match(device_t, cfdata_t, void *);
562 static void	wm_attach(device_t, device_t, void *);
563 static int	wm_detach(device_t, int);
564 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
565 static void	wm_get_auto_rd_done(struct wm_softc *);
566 static void	wm_lan_init_done(struct wm_softc *);
567 static void	wm_get_cfg_done(struct wm_softc *);
568 static int	wm_get_swsm_semaphore(struct wm_softc *);
569 static void	wm_put_swsm_semaphore(struct wm_softc *);
570 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
571 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
572 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
573 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
574 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
575 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
576 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
577 
578 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
579 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
580 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
581 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
582 		     uint32_t, uint16_t *);
583 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
584 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
585 static void	wm_82547_txfifo_stall(void *);
586 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
587 static int	wm_check_mng_mode(struct wm_softc *);
588 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
589 static int	wm_check_mng_mode_82574(struct wm_softc *);
590 static int	wm_check_mng_mode_generic(struct wm_softc *);
591 static int	wm_enable_mng_pass_thru(struct wm_softc *);
592 static int	wm_check_reset_block(struct wm_softc *);
593 static void	wm_get_hw_control(struct wm_softc *);
594 static int	wm_check_for_link(struct wm_softc *);
595 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
596 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
597 #ifdef WM_WOL
598 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
599 #endif
600 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
601 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
602 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
603 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
604 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
605 static void	wm_smbustopci(struct wm_softc *);
606 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
607 static void	wm_reset_init_script_82575(struct wm_softc *);
608 static void	wm_release_manageability(struct wm_softc *);
609 static void	wm_release_hw_control(struct wm_softc *);
610 static void	wm_get_wakeup(struct wm_softc *);
611 #ifdef WM_WOL
612 static void	wm_enable_phy_wakeup(struct wm_softc *);
613 static void	wm_enable_wakeup(struct wm_softc *);
614 #endif
615 static void	wm_init_manageability(struct wm_softc *);
616 static void	wm_set_eee_i350(struct wm_softc *);
617 
618 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
619     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
620 
621 /*
622  * Devices supported by this driver.
623  */
624 static const struct wm_product {
625 	pci_vendor_id_t		wmp_vendor;
626 	pci_product_id_t	wmp_product;
627 	const char		*wmp_name;
628 	wm_chip_type		wmp_type;
629 	int			wmp_flags;
630 #define	WMP_F_1000X		0x01
631 #define	WMP_F_1000T		0x02
632 #define	WMP_F_SERDES		0x04
633 } wm_products[] = {
634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
635 	  "Intel i82542 1000BASE-X Ethernet",
636 	  WM_T_82542_2_1,	WMP_F_1000X },
637 
638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
639 	  "Intel i82543GC 1000BASE-X Ethernet",
640 	  WM_T_82543,		WMP_F_1000X },
641 
642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
643 	  "Intel i82543GC 1000BASE-T Ethernet",
644 	  WM_T_82543,		WMP_F_1000T },
645 
646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
647 	  "Intel i82544EI 1000BASE-T Ethernet",
648 	  WM_T_82544,		WMP_F_1000T },
649 
650 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
651 	  "Intel i82544EI 1000BASE-X Ethernet",
652 	  WM_T_82544,		WMP_F_1000X },
653 
654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
655 	  "Intel i82544GC 1000BASE-T Ethernet",
656 	  WM_T_82544,		WMP_F_1000T },
657 
658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
659 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
660 	  WM_T_82544,		WMP_F_1000T },
661 
662 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
663 	  "Intel i82540EM 1000BASE-T Ethernet",
664 	  WM_T_82540,		WMP_F_1000T },
665 
666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
667 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
668 	  WM_T_82540,		WMP_F_1000T },
669 
670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
671 	  "Intel i82540EP 1000BASE-T Ethernet",
672 	  WM_T_82540,		WMP_F_1000T },
673 
674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
675 	  "Intel i82540EP 1000BASE-T Ethernet",
676 	  WM_T_82540,		WMP_F_1000T },
677 
678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
679 	  "Intel i82540EP 1000BASE-T Ethernet",
680 	  WM_T_82540,		WMP_F_1000T },
681 
682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
683 	  "Intel i82545EM 1000BASE-T Ethernet",
684 	  WM_T_82545,		WMP_F_1000T },
685 
686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
687 	  "Intel i82545GM 1000BASE-T Ethernet",
688 	  WM_T_82545_3,		WMP_F_1000T },
689 
690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
691 	  "Intel i82545GM 1000BASE-X Ethernet",
692 	  WM_T_82545_3,		WMP_F_1000X },
693 #if 0
694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
695 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
696 	  WM_T_82545_3,		WMP_F_SERDES },
697 #endif
698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
699 	  "Intel i82546EB 1000BASE-T Ethernet",
700 	  WM_T_82546,		WMP_F_1000T },
701 
702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
703 	  "Intel i82546EB 1000BASE-T Ethernet",
704 	  WM_T_82546,		WMP_F_1000T },
705 
706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
707 	  "Intel i82545EM 1000BASE-X Ethernet",
708 	  WM_T_82545,		WMP_F_1000X },
709 
710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
711 	  "Intel i82546EB 1000BASE-X Ethernet",
712 	  WM_T_82546,		WMP_F_1000X },
713 
714 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
715 	  "Intel i82546GB 1000BASE-T Ethernet",
716 	  WM_T_82546_3,		WMP_F_1000T },
717 
718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
719 	  "Intel i82546GB 1000BASE-X Ethernet",
720 	  WM_T_82546_3,		WMP_F_1000X },
721 #if 0
722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
723 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
724 	  WM_T_82546_3,		WMP_F_SERDES },
725 #endif
726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
727 	  "i82546GB quad-port Gigabit Ethernet",
728 	  WM_T_82546_3,		WMP_F_1000T },
729 
730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
731 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
732 	  WM_T_82546_3,		WMP_F_1000T },
733 
734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
735 	  "Intel PRO/1000MT (82546GB)",
736 	  WM_T_82546_3,		WMP_F_1000T },
737 
738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
739 	  "Intel i82541EI 1000BASE-T Ethernet",
740 	  WM_T_82541,		WMP_F_1000T },
741 
742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
743 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
744 	  WM_T_82541,		WMP_F_1000T },
745 
746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
747 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
748 	  WM_T_82541,		WMP_F_1000T },
749 
750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
751 	  "Intel i82541ER 1000BASE-T Ethernet",
752 	  WM_T_82541_2,		WMP_F_1000T },
753 
754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
755 	  "Intel i82541GI 1000BASE-T Ethernet",
756 	  WM_T_82541_2,		WMP_F_1000T },
757 
758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
759 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
760 	  WM_T_82541_2,		WMP_F_1000T },
761 
762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
763 	  "Intel i82541PI 1000BASE-T Ethernet",
764 	  WM_T_82541_2,		WMP_F_1000T },
765 
766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
767 	  "Intel i82547EI 1000BASE-T Ethernet",
768 	  WM_T_82547,		WMP_F_1000T },
769 
770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
771 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
772 	  WM_T_82547,		WMP_F_1000T },
773 
774 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
775 	  "Intel i82547GI 1000BASE-T Ethernet",
776 	  WM_T_82547_2,		WMP_F_1000T },
777 
778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
779 	  "Intel PRO/1000 PT (82571EB)",
780 	  WM_T_82571,		WMP_F_1000T },
781 
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
783 	  "Intel PRO/1000 PF (82571EB)",
784 	  WM_T_82571,		WMP_F_1000X },
785 #if 0
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
787 	  "Intel PRO/1000 PB (82571EB)",
788 	  WM_T_82571,		WMP_F_SERDES },
789 #endif
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
791 	  "Intel PRO/1000 QT (82571EB)",
792 	  WM_T_82571,		WMP_F_1000T },
793 
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
795 	  "Intel i82572EI 1000baseT Ethernet",
796 	  WM_T_82572,		WMP_F_1000T },
797 
798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
799 	  "Intel PRO/1000 PT Quad Port Server Adapter",
800 	  WM_T_82571,		WMP_F_1000T, },
801 
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
803 	  "Intel i82572EI 1000baseX Ethernet",
804 	  WM_T_82572,		WMP_F_1000X },
805 #if 0
806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
807 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
808 	  WM_T_82572,		WMP_F_SERDES },
809 #endif
810 
811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
812 	  "Intel i82572EI 1000baseT Ethernet",
813 	  WM_T_82572,		WMP_F_1000T },
814 
815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
816 	  "Intel i82573E",
817 	  WM_T_82573,		WMP_F_1000T },
818 
819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
820 	  "Intel i82573E IAMT",
821 	  WM_T_82573,		WMP_F_1000T },
822 
823 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
824 	  "Intel i82573L Gigabit Ethernet",
825 	  WM_T_82573,		WMP_F_1000T },
826 
827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
828 	  "Intel i82574L",
829 	  WM_T_82574,		WMP_F_1000T },
830 
831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
832 	  "Intel i82583V",
833 	  WM_T_82583,		WMP_F_1000T },
834 
835 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
836 	  "i80003 dual 1000baseT Ethernet",
837 	  WM_T_80003,		WMP_F_1000T },
838 
839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
840 	  "i80003 dual 1000baseX Ethernet",
841 	  WM_T_80003,		WMP_F_1000T },
842 #if 0
843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
844 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
845 	  WM_T_80003,		WMP_F_SERDES },
846 #endif
847 
848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
849 	  "Intel i80003 1000baseT Ethernet",
850 	  WM_T_80003,		WMP_F_1000T },
851 #if 0
852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
853 	  "Intel i80003 Gigabit Ethernet (SERDES)",
854 	  WM_T_80003,		WMP_F_SERDES },
855 #endif
856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
857 	  "Intel i82801H (M_AMT) LAN Controller",
858 	  WM_T_ICH8,		WMP_F_1000T },
859 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
860 	  "Intel i82801H (AMT) LAN Controller",
861 	  WM_T_ICH8,		WMP_F_1000T },
862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
863 	  "Intel i82801H LAN Controller",
864 	  WM_T_ICH8,		WMP_F_1000T },
865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
866 	  "Intel i82801H (IFE) LAN Controller",
867 	  WM_T_ICH8,		WMP_F_1000T },
868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
869 	  "Intel i82801H (M) LAN Controller",
870 	  WM_T_ICH8,		WMP_F_1000T },
871 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
872 	  "Intel i82801H IFE (GT) LAN Controller",
873 	  WM_T_ICH8,		WMP_F_1000T },
874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
875 	  "Intel i82801H IFE (G) LAN Controller",
876 	  WM_T_ICH8,		WMP_F_1000T },
877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
878 	  "82801I (AMT) LAN Controller",
879 	  WM_T_ICH9,		WMP_F_1000T },
880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
881 	  "82801I LAN Controller",
882 	  WM_T_ICH9,		WMP_F_1000T },
883 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
884 	  "82801I (G) LAN Controller",
885 	  WM_T_ICH9,		WMP_F_1000T },
886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
887 	  "82801I (GT) LAN Controller",
888 	  WM_T_ICH9,		WMP_F_1000T },
889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
890 	  "82801I (C) LAN Controller",
891 	  WM_T_ICH9,		WMP_F_1000T },
892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
893 	  "82801I mobile LAN Controller",
894 	  WM_T_ICH9,		WMP_F_1000T },
895 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
896 	  "82801I mobile (V) LAN Controller",
897 	  WM_T_ICH9,		WMP_F_1000T },
898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
899 	  "82801I mobile (AMT) LAN Controller",
900 	  WM_T_ICH9,		WMP_F_1000T },
901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
902 	  "82567LM-4 LAN Controller",
903 	  WM_T_ICH9,		WMP_F_1000T },
904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
905 	  "82567V-3 LAN Controller",
906 	  WM_T_ICH9,		WMP_F_1000T },
907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
908 	  "82567LM-2 LAN Controller",
909 	  WM_T_ICH10,		WMP_F_1000T },
910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
911 	  "82567LF-2 LAN Controller",
912 	  WM_T_ICH10,		WMP_F_1000T },
913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
914 	  "82567LM-3 LAN Controller",
915 	  WM_T_ICH10,		WMP_F_1000T },
916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
917 	  "82567LF-3 LAN Controller",
918 	  WM_T_ICH10,		WMP_F_1000T },
919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
920 	  "82567V-2 LAN Controller",
921 	  WM_T_ICH10,		WMP_F_1000T },
922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
923 	  "82567V-3? LAN Controller",
924 	  WM_T_ICH10,		WMP_F_1000T },
925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
926 	  "HANKSVILLE LAN Controller",
927 	  WM_T_ICH10,		WMP_F_1000T },
928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
929 	  "PCH LAN (82577LM) Controller",
930 	  WM_T_PCH,		WMP_F_1000T },
931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
932 	  "PCH LAN (82577LC) Controller",
933 	  WM_T_PCH,		WMP_F_1000T },
934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
935 	  "PCH LAN (82578DM) Controller",
936 	  WM_T_PCH,		WMP_F_1000T },
937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
938 	  "PCH LAN (82578DC) Controller",
939 	  WM_T_PCH,		WMP_F_1000T },
940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
941 	  "PCH2 LAN (82579LM) Controller",
942 	  WM_T_PCH2,		WMP_F_1000T },
943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
944 	  "PCH2 LAN (82579V) Controller",
945 	  WM_T_PCH2,		WMP_F_1000T },
946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
947 	  "82575EB dual-1000baseT Ethernet",
948 	  WM_T_82575,		WMP_F_1000T },
949 #if 0
950 	/*
951 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
952 	 * disabled for now ...
953 	 */
954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
955 	  "82575EB dual-1000baseX Ethernet (SERDES)",
956 	  WM_T_82575,		WMP_F_SERDES },
957 #endif
958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
959 	  "82575GB quad-1000baseT Ethernet",
960 	  WM_T_82575,		WMP_F_1000T },
961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
962 	  "82575GB quad-1000baseT Ethernet (PM)",
963 	  WM_T_82575,		WMP_F_1000T },
964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
965 	  "82576 1000BaseT Ethernet",
966 	  WM_T_82576,		WMP_F_1000T },
967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
968 	  "82576 1000BaseX Ethernet",
969 	  WM_T_82576,		WMP_F_1000X },
970 #if 0
971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
972 	  "82576 gigabit Ethernet (SERDES)",
973 	  WM_T_82576,		WMP_F_SERDES },
974 #endif
975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
976 	  "82576 quad-1000BaseT Ethernet",
977 	  WM_T_82576,		WMP_F_1000T },
978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
979 	  "82576 gigabit Ethernet",
980 	  WM_T_82576,		WMP_F_1000T },
981 #if 0
982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
983 	  "82576 gigabit Ethernet (SERDES)",
984 	  WM_T_82576,		WMP_F_SERDES },
985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
986 	  "82576 quad-gigabit Ethernet (SERDES)",
987 	  WM_T_82576,		WMP_F_SERDES },
988 #endif
989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
990 	  "82580 1000BaseT Ethernet",
991 	  WM_T_82580,		WMP_F_1000T },
992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
993 	  "82580 1000BaseX Ethernet",
994 	  WM_T_82580,		WMP_F_1000X },
995 #if 0
996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
997 	  "82580 1000BaseT Ethernet (SERDES)",
998 	  WM_T_82580,		WMP_F_SERDES },
999 #endif
1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1001 	  "82580 gigabit Ethernet (SGMII)",
1002 	  WM_T_82580,		WMP_F_1000T },
1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1004 	  "82580 dual-1000BaseT Ethernet",
1005 	  WM_T_82580,		WMP_F_1000T },
1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
1007 	  "82580 1000BaseT Ethernet",
1008 	  WM_T_82580ER,		WMP_F_1000T },
1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
1010 	  "82580 dual-1000BaseT Ethernet",
1011 	  WM_T_82580ER,		WMP_F_1000T },
1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1013 	  "82580 quad-1000BaseX Ethernet",
1014 	  WM_T_82580,		WMP_F_1000X },
1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1016 	  "I350 Gigabit Network Connection",
1017 	  WM_T_I350,		WMP_F_1000T },
1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1019 	  "I350 Gigabit Fiber Network Connection",
1020 	  WM_T_I350,		WMP_F_1000X },
1021 #if 0
1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1023 	  "I350 Gigabit Backplane Connection",
1024 	  WM_T_I350,		WMP_F_SERDES },
1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1026 	  "I350 Gigabit Connection",
1027 	  WM_T_I350,		WMP_F_1000T },
1028 #endif
1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1030 	  "I354 Gigabit Connection",
1031 	  WM_T_I354,		WMP_F_1000T },
1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1033 	  "I210-T1 Ethernet Server Adapter",
1034 	  WM_T_I210,		WMP_F_1000T },
1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1036 	  "I210 Ethernet (Copper OEM)",
1037 	  WM_T_I210,		WMP_F_1000T },
1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1039 	  "I210 Ethernet (Copper IT)",
1040 	  WM_T_I210,		WMP_F_1000T },
1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1042 	  "I210 Gigabit Ethernet (Fiber)",
1043 	  WM_T_I210,		WMP_F_1000X },
1044 #if 0
1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1046 	  "I210 Gigabit Ethernet (SERDES)",
1047 	  WM_T_I210,		WMP_F_SERDES },
1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1049 	  "I210 Gigabit Ethernet (SGMII)",
1050 	  WM_T_I210,		WMP_F_SERDES },
1051 #endif
1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1053 	  "I211 Ethernet (COPPER)",
1054 	  WM_T_I211,		WMP_F_1000T },
1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1056 	  "I217 V Ethernet Connection",
1057 	  WM_T_PCH_LPT,		WMP_F_1000T },
1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1059 	  "I217 LM Ethernet Connection",
1060 	  WM_T_PCH_LPT,		WMP_F_1000T },
1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1062 	  "I218 V Ethernet Connection",
1063 	  WM_T_PCH_LPT,		WMP_F_1000T },
1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1065 	  "I218 LM Ethernet Connection",
1066 	  WM_T_PCH_LPT,		WMP_F_1000T },
1067 	{ 0,			0,
1068 	  NULL,
1069 	  0,			0 },
1070 };
1071 
1072 #ifdef WM_EVENT_COUNTERS
1073 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1074 #endif /* WM_EVENT_COUNTERS */
1075 
1076 #if 0 /* Not currently used */
1077 static inline uint32_t
1078 wm_io_read(struct wm_softc *sc, int reg)
1079 {
1080 
1081 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1082 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1083 }
1084 #endif
1085 
1086 static inline void
1087 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1088 {
1089 
1090 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1091 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1092 }
1093 
1094 static inline void
1095 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1096     uint32_t data)
1097 {
1098 	uint32_t regval;
1099 	int i;
1100 
1101 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1102 
1103 	CSR_WRITE(sc, reg, regval);
1104 
1105 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1106 		delay(5);
1107 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1108 			break;
1109 	}
1110 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1111 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1112 		    device_xname(sc->sc_dev), reg);
1113 	}
1114 }
1115 
1116 static inline void
1117 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1118 {
1119 	wa->wa_low = htole32(v & 0xffffffffU);
1120 	if (sizeof(bus_addr_t) == 8)
1121 		wa->wa_high = htole32((uint64_t) v >> 32);
1122 	else
1123 		wa->wa_high = 0;
1124 }
1125 
1126 static void
1127 wm_set_spiaddrbits(struct wm_softc *sc)
1128 {
1129 	uint32_t reg;
1130 
1131 	sc->sc_flags |= WM_F_EEPROM_SPI;
1132 	reg = CSR_READ(sc, WMREG_EECD);
1133 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1134 }
1135 
1136 static const struct wm_product *
1137 wm_lookup(const struct pci_attach_args *pa)
1138 {
1139 	const struct wm_product *wmp;
1140 
1141 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1142 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1143 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1144 			return wmp;
1145 	}
1146 	return NULL;
1147 }
1148 
1149 static int
1150 wm_match(device_t parent, cfdata_t cf, void *aux)
1151 {
1152 	struct pci_attach_args *pa = aux;
1153 
1154 	if (wm_lookup(pa) != NULL)
1155 		return 1;
1156 
1157 	return 0;
1158 }
1159 
1160 static void
1161 wm_attach(device_t parent, device_t self, void *aux)
1162 {
1163 	struct wm_softc *sc = device_private(self);
1164 	struct pci_attach_args *pa = aux;
1165 	prop_dictionary_t dict;
1166 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1167 	pci_chipset_tag_t pc = pa->pa_pc;
1168 	pci_intr_handle_t ih;
1169 	const char *intrstr = NULL;
1170 	const char *eetype, *xname;
1171 	bus_space_tag_t memt;
1172 	bus_space_handle_t memh;
1173 	bus_size_t memsize;
1174 	int memh_valid;
1175 	int i, error;
1176 	const struct wm_product *wmp;
1177 	prop_data_t ea;
1178 	prop_number_t pn;
1179 	uint8_t enaddr[ETHER_ADDR_LEN];
1180 	uint16_t cfg1, cfg2, swdpin, io3;
1181 	pcireg_t preg, memtype;
1182 	uint16_t eeprom_data, apme_mask;
1183 	uint32_t reg;
1184 
1185 	sc->sc_dev = self;
1186 	callout_init(&sc->sc_tick_ch, 0);
1187 
1188 	sc->sc_wmp = wmp = wm_lookup(pa);
1189 	if (wmp == NULL) {
1190 		printf("\n");
1191 		panic("wm_attach: impossible");
1192 	}
1193 
1194 	sc->sc_pc = pa->pa_pc;
1195 	sc->sc_pcitag = pa->pa_tag;
1196 
1197 	if (pci_dma64_available(pa))
1198 		sc->sc_dmat = pa->pa_dmat64;
1199 	else
1200 		sc->sc_dmat = pa->pa_dmat;
1201 
1202 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1203 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1204 
1205 	sc->sc_type = wmp->wmp_type;
1206 	if (sc->sc_type < WM_T_82543) {
1207 		if (sc->sc_rev < 2) {
1208 			aprint_error_dev(sc->sc_dev,
1209 			    "i82542 must be at least rev. 2\n");
1210 			return;
1211 		}
1212 		if (sc->sc_rev < 3)
1213 			sc->sc_type = WM_T_82542_2_0;
1214 	}
1215 
1216 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1217 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1218 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1219 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1220 		sc->sc_flags |= WM_F_NEWQUEUE;
1221 
1222 	/* Set device properties (mactype) */
1223 	dict = device_properties(sc->sc_dev);
1224 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1225 
1226 	/*
1227 	 * Map the device.  All devices support memory-mapped acccess,
1228 	 * and it is really required for normal operation.
1229 	 */
1230 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1231 	switch (memtype) {
1232 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1233 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1234 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1235 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1236 		break;
1237 	default:
1238 		memh_valid = 0;
1239 		break;
1240 	}
1241 
1242 	if (memh_valid) {
1243 		sc->sc_st = memt;
1244 		sc->sc_sh = memh;
1245 		sc->sc_ss = memsize;
1246 	} else {
1247 		aprint_error_dev(sc->sc_dev,
1248 		    "unable to map device registers\n");
1249 		return;
1250 	}
1251 
1252 	/*
1253 	 * In addition, i82544 and later support I/O mapped indirect
1254 	 * register access.  It is not desirable (nor supported in
1255 	 * this driver) to use it for normal operation, though it is
1256 	 * required to work around bugs in some chip versions.
1257 	 */
1258 	if (sc->sc_type >= WM_T_82544) {
1259 		/* First we have to find the I/O BAR. */
1260 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1261 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1262 			if (memtype == PCI_MAPREG_TYPE_IO)
1263 				break;
1264 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1265 			    PCI_MAPREG_MEM_TYPE_64BIT)
1266 				i += 4;	/* skip high bits, too */
1267 		}
1268 		if (i < PCI_MAPREG_END) {
1269 			/*
1270 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1271 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1272 			 * It's no problem because newer chips has no this
1273 			 * bug.
1274 			 *
1275 			 * The i8254x doesn't apparently respond when the
1276 			 * I/O BAR is 0, which looks somewhat like it's not
1277 			 * been configured.
1278 			 */
1279 			preg = pci_conf_read(pc, pa->pa_tag, i);
1280 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1281 				aprint_error_dev(sc->sc_dev,
1282 				    "WARNING: I/O BAR at zero.\n");
1283 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1284 					0, &sc->sc_iot, &sc->sc_ioh,
1285 					NULL, &sc->sc_ios) == 0) {
1286 				sc->sc_flags |= WM_F_IOH_VALID;
1287 			} else {
1288 				aprint_error_dev(sc->sc_dev,
1289 				    "WARNING: unable to map I/O space\n");
1290 			}
1291 		}
1292 
1293 	}
1294 
1295 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1296 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1297 	preg |= PCI_COMMAND_MASTER_ENABLE;
1298 	if (sc->sc_type < WM_T_82542_2_1)
1299 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1300 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1301 
1302 	/* power up chip */
1303 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1304 	    NULL)) && error != EOPNOTSUPP) {
1305 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1306 		return;
1307 	}
1308 
1309 	/*
1310 	 * Map and establish our interrupt.
1311 	 */
1312 	if (pci_intr_map(pa, &ih)) {
1313 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1314 		return;
1315 	}
1316 	intrstr = pci_intr_string(pc, ih);
1317 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1318 	if (sc->sc_ih == NULL) {
1319 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1320 		if (intrstr != NULL)
1321 			aprint_error(" at %s", intrstr);
1322 		aprint_error("\n");
1323 		return;
1324 	}
1325 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1326 
1327 	/*
1328 	 * Check the function ID (unit number of the chip).
1329 	 */
1330 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1331 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1332 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1333 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1334 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1335 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1336 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1337 	else
1338 		sc->sc_funcid = 0;
1339 
1340 	/*
1341 	 * Determine a few things about the bus we're connected to.
1342 	 */
1343 	if (sc->sc_type < WM_T_82543) {
1344 		/* We don't really know the bus characteristics here. */
1345 		sc->sc_bus_speed = 33;
1346 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1347 		/*
1348 		 * CSA (Communication Streaming Architecture) is about as fast
1349 		 * a 32-bit 66MHz PCI Bus.
1350 		 */
1351 		sc->sc_flags |= WM_F_CSA;
1352 		sc->sc_bus_speed = 66;
1353 		aprint_verbose_dev(sc->sc_dev,
1354 		    "Communication Streaming Architecture\n");
1355 		if (sc->sc_type == WM_T_82547) {
1356 			callout_init(&sc->sc_txfifo_ch, 0);
1357 			callout_setfunc(&sc->sc_txfifo_ch,
1358 					wm_82547_txfifo_stall, sc);
1359 			aprint_verbose_dev(sc->sc_dev,
1360 			    "using 82547 Tx FIFO stall work-around\n");
1361 		}
1362 	} else if (sc->sc_type >= WM_T_82571) {
1363 		sc->sc_flags |= WM_F_PCIE;
1364 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1365 		    && (sc->sc_type != WM_T_ICH10)
1366 		    && (sc->sc_type != WM_T_PCH)
1367 		    && (sc->sc_type != WM_T_PCH2)
1368 		    && (sc->sc_type != WM_T_PCH_LPT)) {
1369 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1370 			/* ICH* and PCH* have no PCIe capability registers */
1371 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1372 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1373 				NULL) == 0)
1374 				aprint_error_dev(sc->sc_dev,
1375 				    "unable to find PCIe capability\n");
1376 		}
1377 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1378 	} else {
1379 		reg = CSR_READ(sc, WMREG_STATUS);
1380 		if (reg & STATUS_BUS64)
1381 			sc->sc_flags |= WM_F_BUS64;
1382 		if ((reg & STATUS_PCIX_MODE) != 0) {
1383 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1384 
1385 			sc->sc_flags |= WM_F_PCIX;
1386 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1387 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1388 				aprint_error_dev(sc->sc_dev,
1389 				    "unable to find PCIX capability\n");
1390 			else if (sc->sc_type != WM_T_82545_3 &&
1391 				 sc->sc_type != WM_T_82546_3) {
1392 				/*
1393 				 * Work around a problem caused by the BIOS
1394 				 * setting the max memory read byte count
1395 				 * incorrectly.
1396 				 */
1397 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1398 				    sc->sc_pcixe_capoff + PCIX_CMD);
1399 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1400 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1401 
1402 				bytecnt =
1403 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1404 				    PCIX_CMD_BYTECNT_SHIFT;
1405 				maxb =
1406 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1407 				    PCIX_STATUS_MAXB_SHIFT;
1408 				if (bytecnt > maxb) {
1409 					aprint_verbose_dev(sc->sc_dev,
1410 					    "resetting PCI-X MMRBC: %d -> %d\n",
1411 					    512 << bytecnt, 512 << maxb);
1412 					pcix_cmd = (pcix_cmd &
1413 					    ~PCIX_CMD_BYTECNT_MASK) |
1414 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1415 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1416 					    sc->sc_pcixe_capoff + PCIX_CMD,
1417 					    pcix_cmd);
1418 				}
1419 			}
1420 		}
1421 		/*
1422 		 * The quad port adapter is special; it has a PCIX-PCIX
1423 		 * bridge on the board, and can run the secondary bus at
1424 		 * a higher speed.
1425 		 */
1426 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1427 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1428 								      : 66;
1429 		} else if (sc->sc_flags & WM_F_PCIX) {
1430 			switch (reg & STATUS_PCIXSPD_MASK) {
1431 			case STATUS_PCIXSPD_50_66:
1432 				sc->sc_bus_speed = 66;
1433 				break;
1434 			case STATUS_PCIXSPD_66_100:
1435 				sc->sc_bus_speed = 100;
1436 				break;
1437 			case STATUS_PCIXSPD_100_133:
1438 				sc->sc_bus_speed = 133;
1439 				break;
1440 			default:
1441 				aprint_error_dev(sc->sc_dev,
1442 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1443 				    reg & STATUS_PCIXSPD_MASK);
1444 				sc->sc_bus_speed = 66;
1445 				break;
1446 			}
1447 		} else
1448 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1449 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1450 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1451 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1452 	}
1453 
1454 	/*
1455 	 * Allocate the control data structures, and create and load the
1456 	 * DMA map for it.
1457 	 *
1458 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1459 	 * memory.  So must Rx descriptors.  We simplify by allocating
1460 	 * both sets within the same 4G segment.
1461 	 */
1462 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1463 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1464 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1465 	    sizeof(struct wm_control_data_82542) :
1466 	    sizeof(struct wm_control_data_82544);
1467 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1468 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1469 		    &sc->sc_cd_rseg, 0)) != 0) {
1470 		aprint_error_dev(sc->sc_dev,
1471 		    "unable to allocate control data, error = %d\n",
1472 		    error);
1473 		goto fail_0;
1474 	}
1475 
1476 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1477 		    sc->sc_cd_rseg, sc->sc_cd_size,
1478 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1479 		aprint_error_dev(sc->sc_dev,
1480 		    "unable to map control data, error = %d\n", error);
1481 		goto fail_1;
1482 	}
1483 
1484 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1485 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1486 		aprint_error_dev(sc->sc_dev,
1487 		    "unable to create control data DMA map, error = %d\n",
1488 		    error);
1489 		goto fail_2;
1490 	}
1491 
1492 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1493 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1494 		aprint_error_dev(sc->sc_dev,
1495 		    "unable to load control data DMA map, error = %d\n",
1496 		    error);
1497 		goto fail_3;
1498 	}
1499 
1500 	/*
1501 	 * Create the transmit buffer DMA maps.
1502 	 */
1503 	WM_TXQUEUELEN(sc) =
1504 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1505 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1506 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1507 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1508 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1509 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1510 			aprint_error_dev(sc->sc_dev,
1511 			    "unable to create Tx DMA map %d, error = %d\n",
1512 			    i, error);
1513 			goto fail_4;
1514 		}
1515 	}
1516 
1517 	/*
1518 	 * Create the receive buffer DMA maps.
1519 	 */
1520 	for (i = 0; i < WM_NRXDESC; i++) {
1521 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1522 			    MCLBYTES, 0, 0,
1523 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1524 			aprint_error_dev(sc->sc_dev,
1525 			    "unable to create Rx DMA map %d error = %d\n",
1526 			    i, error);
1527 			goto fail_5;
1528 		}
1529 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1530 	}
1531 
1532 	/* clear interesting stat counters */
1533 	CSR_READ(sc, WMREG_COLC);
1534 	CSR_READ(sc, WMREG_RXERRC);
1535 
1536 	/* get PHY control from SMBus to PCIe */
1537 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1538 	    || (sc->sc_type == WM_T_PCH_LPT))
1539 		wm_smbustopci(sc);
1540 
1541 	/*
1542 	 * Reset the chip to a known state.
1543 	 */
1544 	wm_reset(sc);
1545 
1546 	/*
1547 	 * Get some information about the EEPROM.
1548 	 */
1549 	switch (sc->sc_type) {
1550 	case WM_T_82542_2_0:
1551 	case WM_T_82542_2_1:
1552 	case WM_T_82543:
1553 	case WM_T_82544:
1554 		/* Microwire */
1555 		sc->sc_ee_addrbits = 6;
1556 		break;
1557 	case WM_T_82540:
1558 	case WM_T_82545:
1559 	case WM_T_82545_3:
1560 	case WM_T_82546:
1561 	case WM_T_82546_3:
1562 		/* Microwire */
1563 		reg = CSR_READ(sc, WMREG_EECD);
1564 		if (reg & EECD_EE_SIZE)
1565 			sc->sc_ee_addrbits = 8;
1566 		else
1567 			sc->sc_ee_addrbits = 6;
1568 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1569 		break;
1570 	case WM_T_82541:
1571 	case WM_T_82541_2:
1572 	case WM_T_82547:
1573 	case WM_T_82547_2:
1574 		reg = CSR_READ(sc, WMREG_EECD);
1575 		if (reg & EECD_EE_TYPE) {
1576 			/* SPI */
1577 			wm_set_spiaddrbits(sc);
1578 		} else
1579 			/* Microwire */
1580 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1581 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1582 		break;
1583 	case WM_T_82571:
1584 	case WM_T_82572:
1585 		/* SPI */
1586 		wm_set_spiaddrbits(sc);
1587 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1588 		break;
1589 	case WM_T_82573:
1590 	case WM_T_82574:
1591 	case WM_T_82583:
1592 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1593 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1594 		else {
1595 			/* SPI */
1596 			wm_set_spiaddrbits(sc);
1597 		}
1598 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1599 		break;
1600 	case WM_T_82575:
1601 	case WM_T_82576:
1602 	case WM_T_82580:
1603 	case WM_T_82580ER:
1604 	case WM_T_I350:
1605 	case WM_T_I354: /* XXXX ok? */
1606 	case WM_T_80003:
1607 		/* SPI */
1608 		wm_set_spiaddrbits(sc);
1609 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1610 		break;
1611 	case WM_T_ICH8:
1612 	case WM_T_ICH9:
1613 	case WM_T_ICH10:
1614 	case WM_T_PCH:
1615 	case WM_T_PCH2:
1616 	case WM_T_PCH_LPT:
1617 		/* FLASH */
1618 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1619 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1620 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1621 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1622 			aprint_error_dev(sc->sc_dev,
1623 			    "can't map FLASH registers\n");
1624 			return;
1625 		}
1626 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1627 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1628 						ICH_FLASH_SECTOR_SIZE;
1629 		sc->sc_ich8_flash_bank_size =
1630 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1631 		sc->sc_ich8_flash_bank_size -=
1632 		    (reg & ICH_GFPREG_BASE_MASK);
1633 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1634 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1635 		break;
1636 	case WM_T_I210:
1637 	case WM_T_I211:
1638 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1639 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1640 		break;
1641 	default:
1642 		break;
1643 	}
1644 
1645 	/*
1646 	 * Defer printing the EEPROM type until after verifying the checksum
1647 	 * This allows the EEPROM type to be printed correctly in the case
1648 	 * that no EEPROM is attached.
1649 	 */
1650 	/*
1651 	 * Validate the EEPROM checksum. If the checksum fails, flag
1652 	 * this for later, so we can fail future reads from the EEPROM.
1653 	 */
1654 	if (wm_validate_eeprom_checksum(sc)) {
1655 		/*
1656 		 * Read twice again because some PCI-e parts fail the
1657 		 * first check due to the link being in sleep state.
1658 		 */
1659 		if (wm_validate_eeprom_checksum(sc))
1660 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1661 	}
1662 
1663 	/* Set device properties (macflags) */
1664 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1665 
1666 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1667 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1668 	else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1669 		aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1670 	} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1671 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1672 	} else {
1673 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1674 			eetype = "SPI";
1675 		else
1676 			eetype = "MicroWire";
1677 		aprint_verbose_dev(sc->sc_dev,
1678 		    "%u word (%d address bits) %s EEPROM\n",
1679 		    1U << sc->sc_ee_addrbits,
1680 		    sc->sc_ee_addrbits, eetype);
1681 	}
1682 
1683 	switch (sc->sc_type) {
1684 	case WM_T_82571:
1685 	case WM_T_82572:
1686 	case WM_T_82573:
1687 	case WM_T_82574:
1688 	case WM_T_82583:
1689 	case WM_T_80003:
1690 	case WM_T_ICH8:
1691 	case WM_T_ICH9:
1692 	case WM_T_ICH10:
1693 	case WM_T_PCH:
1694 	case WM_T_PCH2:
1695 	case WM_T_PCH_LPT:
1696 		if (wm_check_mng_mode(sc) != 0)
1697 			wm_get_hw_control(sc);
1698 		break;
1699 	default:
1700 		break;
1701 	}
1702 	wm_get_wakeup(sc);
1703 	/*
1704 	 * Read the Ethernet address from the EEPROM, if not first found
1705 	 * in device properties.
1706 	 */
1707 	ea = prop_dictionary_get(dict, "mac-address");
1708 	if (ea != NULL) {
1709 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1710 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1711 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1712 	} else {
1713 		if (wm_read_mac_addr(sc, enaddr) != 0) {
1714 			aprint_error_dev(sc->sc_dev,
1715 			    "unable to read Ethernet address\n");
1716 			return;
1717 		}
1718 	}
1719 
1720 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1721 	    ether_sprintf(enaddr));
1722 
1723 	/*
1724 	 * Read the config info from the EEPROM, and set up various
1725 	 * bits in the control registers based on their contents.
1726 	 */
1727 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1728 	if (pn != NULL) {
1729 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1730 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1731 	} else {
1732 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1733 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1734 			return;
1735 		}
1736 	}
1737 
1738 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1739 	if (pn != NULL) {
1740 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1741 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1742 	} else {
1743 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1744 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1745 			return;
1746 		}
1747 	}
1748 
1749 	/* check for WM_F_WOL */
1750 	switch (sc->sc_type) {
1751 	case WM_T_82542_2_0:
1752 	case WM_T_82542_2_1:
1753 	case WM_T_82543:
1754 		/* dummy? */
1755 		eeprom_data = 0;
1756 		apme_mask = EEPROM_CFG3_APME;
1757 		break;
1758 	case WM_T_82544:
1759 		apme_mask = EEPROM_CFG2_82544_APM_EN;
1760 		eeprom_data = cfg2;
1761 		break;
1762 	case WM_T_82546:
1763 	case WM_T_82546_3:
1764 	case WM_T_82571:
1765 	case WM_T_82572:
1766 	case WM_T_82573:
1767 	case WM_T_82574:
1768 	case WM_T_82583:
1769 	case WM_T_80003:
1770 	default:
1771 		apme_mask = EEPROM_CFG3_APME;
1772 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1773 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1774 		break;
1775 	case WM_T_82575:
1776 	case WM_T_82576:
1777 	case WM_T_82580:
1778 	case WM_T_82580ER:
1779 	case WM_T_I350:
1780 	case WM_T_I354: /* XXX ok? */
1781 	case WM_T_ICH8:
1782 	case WM_T_ICH9:
1783 	case WM_T_ICH10:
1784 	case WM_T_PCH:
1785 	case WM_T_PCH2:
1786 	case WM_T_PCH_LPT:
1787 		/* XXX The funcid should be checked on some devices */
1788 		apme_mask = WUC_APME;
1789 		eeprom_data = CSR_READ(sc, WMREG_WUC);
1790 		break;
1791 	}
1792 
1793 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1794 	if ((eeprom_data & apme_mask) != 0)
1795 		sc->sc_flags |= WM_F_WOL;
1796 #ifdef WM_DEBUG
1797 	if ((sc->sc_flags & WM_F_WOL) != 0)
1798 		printf("WOL\n");
1799 #endif
1800 
1801 	/*
1802 	 * XXX need special handling for some multiple port cards
1803 	 * to disable a paticular port.
1804 	 */
1805 
1806 	if (sc->sc_type >= WM_T_82544) {
1807 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1808 		if (pn != NULL) {
1809 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1810 			swdpin = (uint16_t) prop_number_integer_value(pn);
1811 		} else {
1812 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1813 				aprint_error_dev(sc->sc_dev,
1814 				    "unable to read SWDPIN\n");
1815 				return;
1816 			}
1817 		}
1818 	}
1819 
1820 	if (cfg1 & EEPROM_CFG1_ILOS)
1821 		sc->sc_ctrl |= CTRL_ILOS;
1822 	if (sc->sc_type >= WM_T_82544) {
1823 		sc->sc_ctrl |=
1824 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1825 		    CTRL_SWDPIO_SHIFT;
1826 		sc->sc_ctrl |=
1827 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1828 		    CTRL_SWDPINS_SHIFT;
1829 	} else {
1830 		sc->sc_ctrl |=
1831 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1832 		    CTRL_SWDPIO_SHIFT;
1833 	}
1834 
1835 #if 0
1836 	if (sc->sc_type >= WM_T_82544) {
1837 		if (cfg1 & EEPROM_CFG1_IPS0)
1838 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1839 		if (cfg1 & EEPROM_CFG1_IPS1)
1840 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1841 		sc->sc_ctrl_ext |=
1842 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1843 		    CTRL_EXT_SWDPIO_SHIFT;
1844 		sc->sc_ctrl_ext |=
1845 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1846 		    CTRL_EXT_SWDPINS_SHIFT;
1847 	} else {
1848 		sc->sc_ctrl_ext |=
1849 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1850 		    CTRL_EXT_SWDPIO_SHIFT;
1851 	}
1852 #endif
1853 
1854 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1855 #if 0
1856 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1857 #endif
1858 
1859 	/*
1860 	 * Set up some register offsets that are different between
1861 	 * the i82542 and the i82543 and later chips.
1862 	 */
1863 	if (sc->sc_type < WM_T_82543) {
1864 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1865 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1866 	} else {
1867 		sc->sc_rdt_reg = WMREG_RDT;
1868 		sc->sc_tdt_reg = WMREG_TDT;
1869 	}
1870 
1871 	if (sc->sc_type == WM_T_PCH) {
1872 		uint16_t val;
1873 
1874 		/* Save the NVM K1 bit setting */
1875 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1876 
1877 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1878 			sc->sc_nvm_k1_enabled = 1;
1879 		else
1880 			sc->sc_nvm_k1_enabled = 0;
1881 	}
1882 
1883 	/*
1884 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1885 	 * media structures accordingly.
1886 	 */
1887 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1888 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1889 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
1890 	    || sc->sc_type == WM_T_82573
1891 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1892 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1893 		wm_gmii_mediainit(sc, wmp->wmp_product);
1894 	} else if (sc->sc_type < WM_T_82543 ||
1895 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1896 		if (wmp->wmp_flags & WMP_F_1000T)
1897 			aprint_error_dev(sc->sc_dev,
1898 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1899 		wm_tbi_mediainit(sc);
1900 	} else {
1901 		switch (sc->sc_type) {
1902 		case WM_T_82575:
1903 		case WM_T_82576:
1904 		case WM_T_82580:
1905 		case WM_T_82580ER:
1906 		case WM_T_I350:
1907 		case WM_T_I354:
1908 		case WM_T_I210:
1909 		case WM_T_I211:
1910 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
1911 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1912 			case CTRL_EXT_LINK_MODE_1000KX:
1913 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
1914 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1915 				    reg | CTRL_EXT_I2C_ENA);
1916 				panic("not supported yet\n");
1917 				break;
1918 			case CTRL_EXT_LINK_MODE_SGMII:
1919 				if (wm_sgmii_uses_mdio(sc)) {
1920 					aprint_verbose_dev(sc->sc_dev,
1921 					    "SGMII(MDIO)\n");
1922 					sc->sc_flags |= WM_F_SGMII;
1923 					wm_gmii_mediainit(sc,
1924 					    wmp->wmp_product);
1925 					break;
1926 				}
1927 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
1928 				/*FALLTHROUGH*/
1929 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1930 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
1931 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1932 				    reg | CTRL_EXT_I2C_ENA);
1933 				panic("not supported yet\n");
1934 				break;
1935 			case CTRL_EXT_LINK_MODE_GMII:
1936 			default:
1937 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1938 				    reg & ~CTRL_EXT_I2C_ENA);
1939 				wm_gmii_mediainit(sc, wmp->wmp_product);
1940 				break;
1941 			}
1942 			break;
1943 		default:
1944 			if (wmp->wmp_flags & WMP_F_1000X)
1945 				aprint_error_dev(sc->sc_dev,
1946 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1947 			wm_gmii_mediainit(sc, wmp->wmp_product);
1948 		}
1949 	}
1950 
1951 	ifp = &sc->sc_ethercom.ec_if;
1952 	xname = device_xname(sc->sc_dev);
1953 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1954 	ifp->if_softc = sc;
1955 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1956 	ifp->if_ioctl = wm_ioctl;
1957 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1958 		ifp->if_start = wm_nq_start;
1959 	else
1960 		ifp->if_start = wm_start;
1961 	ifp->if_watchdog = wm_watchdog;
1962 	ifp->if_init = wm_init;
1963 	ifp->if_stop = wm_stop;
1964 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1965 	IFQ_SET_READY(&ifp->if_snd);
1966 
1967 	/* Check for jumbo frame */
1968 	switch (sc->sc_type) {
1969 	case WM_T_82573:
1970 		/* XXX limited to 9234 if ASPM is disabled */
1971 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1972 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1973 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1974 		break;
1975 	case WM_T_82571:
1976 	case WM_T_82572:
1977 	case WM_T_82574:
1978 	case WM_T_82575:
1979 	case WM_T_82576:
1980 	case WM_T_82580:
1981 	case WM_T_82580ER:
1982 	case WM_T_I350:
1983 	case WM_T_I354: /* XXXX ok? */
1984 	case WM_T_I210:
1985 	case WM_T_I211:
1986 	case WM_T_80003:
1987 	case WM_T_ICH9:
1988 	case WM_T_ICH10:
1989 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
1990 	case WM_T_PCH_LPT:
1991 		/* XXX limited to 9234 */
1992 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1993 		break;
1994 	case WM_T_PCH:
1995 		/* XXX limited to 4096 */
1996 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1997 		break;
1998 	case WM_T_82542_2_0:
1999 	case WM_T_82542_2_1:
2000 	case WM_T_82583:
2001 	case WM_T_ICH8:
2002 		/* No support for jumbo frame */
2003 		break;
2004 	default:
2005 		/* ETHER_MAX_LEN_JUMBO */
2006 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2007 		break;
2008 	}
2009 
2010 	/*
2011 	 * If we're a i82543 or greater, we can support VLANs.
2012 	 */
2013 	if (sc->sc_type >= WM_T_82543)
2014 		sc->sc_ethercom.ec_capabilities |=
2015 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2016 
2017 	/*
2018 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2019 	 * on i82543 and later.
2020 	 */
2021 	if (sc->sc_type >= WM_T_82543) {
2022 		ifp->if_capabilities |=
2023 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2024 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2025 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2026 		    IFCAP_CSUM_TCPv6_Tx |
2027 		    IFCAP_CSUM_UDPv6_Tx;
2028 	}
2029 
2030 	/*
2031 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2032 	 *
2033 	 *	82541GI (8086:1076) ... no
2034 	 *	82572EI (8086:10b9) ... yes
2035 	 */
2036 	if (sc->sc_type >= WM_T_82571) {
2037 		ifp->if_capabilities |=
2038 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2039 	}
2040 
2041 	/*
2042 	 * If we're a i82544 or greater (except i82547), we can do
2043 	 * TCP segmentation offload.
2044 	 */
2045 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2046 		ifp->if_capabilities |= IFCAP_TSOv4;
2047 	}
2048 
2049 	if (sc->sc_type >= WM_T_82571) {
2050 		ifp->if_capabilities |= IFCAP_TSOv6;
2051 	}
2052 
2053 	/*
2054 	 * Attach the interface.
2055 	 */
2056 	if_attach(ifp);
2057 	ether_ifattach(ifp, enaddr);
2058 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2059 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2060 
2061 #ifdef WM_EVENT_COUNTERS
2062 	/* Attach event counters. */
2063 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2064 	    NULL, xname, "txsstall");
2065 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2066 	    NULL, xname, "txdstall");
2067 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2068 	    NULL, xname, "txfifo_stall");
2069 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2070 	    NULL, xname, "txdw");
2071 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2072 	    NULL, xname, "txqe");
2073 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2074 	    NULL, xname, "rxintr");
2075 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2076 	    NULL, xname, "linkintr");
2077 
2078 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2079 	    NULL, xname, "rxipsum");
2080 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2081 	    NULL, xname, "rxtusum");
2082 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2083 	    NULL, xname, "txipsum");
2084 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2085 	    NULL, xname, "txtusum");
2086 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2087 	    NULL, xname, "txtusum6");
2088 
2089 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2090 	    NULL, xname, "txtso");
2091 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2092 	    NULL, xname, "txtso6");
2093 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2094 	    NULL, xname, "txtsopain");
2095 
2096 	for (i = 0; i < WM_NTXSEGS; i++) {
2097 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2098 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2099 		    NULL, xname, wm_txseg_evcnt_names[i]);
2100 	}
2101 
2102 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2103 	    NULL, xname, "txdrop");
2104 
2105 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2106 	    NULL, xname, "tu");
2107 
2108 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2109 	    NULL, xname, "tx_xoff");
2110 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2111 	    NULL, xname, "tx_xon");
2112 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2113 	    NULL, xname, "rx_xoff");
2114 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2115 	    NULL, xname, "rx_xon");
2116 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2117 	    NULL, xname, "rx_macctl");
2118 #endif /* WM_EVENT_COUNTERS */
2119 
2120 	if (pmf_device_register(self, wm_suspend, wm_resume))
2121 		pmf_class_network_register(self, ifp);
2122 	else
2123 		aprint_error_dev(self, "couldn't establish power handler\n");
2124 
2125 	return;
2126 
2127 	/*
2128 	 * Free any resources we've allocated during the failed attach
2129 	 * attempt.  Do this in reverse order and fall through.
2130 	 */
2131  fail_5:
2132 	for (i = 0; i < WM_NRXDESC; i++) {
2133 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2134 			bus_dmamap_destroy(sc->sc_dmat,
2135 			    sc->sc_rxsoft[i].rxs_dmamap);
2136 	}
2137  fail_4:
2138 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2139 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2140 			bus_dmamap_destroy(sc->sc_dmat,
2141 			    sc->sc_txsoft[i].txs_dmamap);
2142 	}
2143 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2144  fail_3:
2145 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2146  fail_2:
2147 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2148 	    sc->sc_cd_size);
2149  fail_1:
2150 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2151  fail_0:
2152 	return;
2153 }
2154 
2155 static int
2156 wm_detach(device_t self, int flags __unused)
2157 {
2158 	struct wm_softc *sc = device_private(self);
2159 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2160 	int i, s;
2161 
2162 	s = splnet();
2163 	/* Stop the interface. Callouts are stopped in it. */
2164 	wm_stop(ifp, 1);
2165 	splx(s);
2166 
2167 	pmf_device_deregister(self);
2168 
2169 	/* Tell the firmware about the release */
2170 	wm_release_manageability(sc);
2171 	wm_release_hw_control(sc);
2172 
2173 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2174 
2175 	/* Delete all remaining media. */
2176 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2177 
2178 	ether_ifdetach(ifp);
2179 	if_detach(ifp);
2180 
2181 
2182 	/* Unload RX dmamaps and free mbufs */
2183 	wm_rxdrain(sc);
2184 
2185 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2186 	for (i = 0; i < WM_NRXDESC; i++) {
2187 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2188 			bus_dmamap_destroy(sc->sc_dmat,
2189 			    sc->sc_rxsoft[i].rxs_dmamap);
2190 	}
2191 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2192 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2193 			bus_dmamap_destroy(sc->sc_dmat,
2194 			    sc->sc_txsoft[i].txs_dmamap);
2195 	}
2196 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2197 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2198 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2199 	    sc->sc_cd_size);
2200 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2201 
2202 	/* Disestablish the interrupt handler */
2203 	if (sc->sc_ih != NULL) {
2204 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2205 		sc->sc_ih = NULL;
2206 	}
2207 
2208 	/* Unmap the registers */
2209 	if (sc->sc_ss) {
2210 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2211 		sc->sc_ss = 0;
2212 	}
2213 
2214 	if (sc->sc_ios) {
2215 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2216 		sc->sc_ios = 0;
2217 	}
2218 
2219 	return 0;
2220 }
2221 
2222 /*
2223  * wm_tx_offload:
2224  *
2225  *	Set up TCP/IP checksumming parameters for the
2226  *	specified packet.
2227  */
2228 static int
2229 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2230     uint8_t *fieldsp)
2231 {
2232 	struct mbuf *m0 = txs->txs_mbuf;
2233 	struct livengood_tcpip_ctxdesc *t;
2234 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
2235 	uint32_t ipcse;
2236 	struct ether_header *eh;
2237 	int offset, iphl;
2238 	uint8_t fields;
2239 
2240 	/*
2241 	 * XXX It would be nice if the mbuf pkthdr had offset
2242 	 * fields for the protocol headers.
2243 	 */
2244 
2245 	eh = mtod(m0, struct ether_header *);
2246 	switch (htons(eh->ether_type)) {
2247 	case ETHERTYPE_IP:
2248 	case ETHERTYPE_IPV6:
2249 		offset = ETHER_HDR_LEN;
2250 		break;
2251 
2252 	case ETHERTYPE_VLAN:
2253 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2254 		break;
2255 
2256 	default:
2257 		/*
2258 		 * Don't support this protocol or encapsulation.
2259 		 */
2260 		*fieldsp = 0;
2261 		*cmdp = 0;
2262 		return 0;
2263 	}
2264 
2265 	if ((m0->m_pkthdr.csum_flags &
2266 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2267 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2268 	} else {
2269 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2270 	}
2271 	ipcse = offset + iphl - 1;
2272 
2273 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2274 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2275 	seg = 0;
2276 	fields = 0;
2277 
2278 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2279 		int hlen = offset + iphl;
2280 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2281 
2282 		if (__predict_false(m0->m_len <
2283 				    (hlen + sizeof(struct tcphdr)))) {
2284 			/*
2285 			 * TCP/IP headers are not in the first mbuf; we need
2286 			 * to do this the slow and painful way.  Let's just
2287 			 * hope this doesn't happen very often.
2288 			 */
2289 			struct tcphdr th;
2290 
2291 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2292 
2293 			m_copydata(m0, hlen, sizeof(th), &th);
2294 			if (v4) {
2295 				struct ip ip;
2296 
2297 				m_copydata(m0, offset, sizeof(ip), &ip);
2298 				ip.ip_len = 0;
2299 				m_copyback(m0,
2300 				    offset + offsetof(struct ip, ip_len),
2301 				    sizeof(ip.ip_len), &ip.ip_len);
2302 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2303 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2304 			} else {
2305 				struct ip6_hdr ip6;
2306 
2307 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2308 				ip6.ip6_plen = 0;
2309 				m_copyback(m0,
2310 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2311 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2312 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2313 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2314 			}
2315 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2316 			    sizeof(th.th_sum), &th.th_sum);
2317 
2318 			hlen += th.th_off << 2;
2319 		} else {
2320 			/*
2321 			 * TCP/IP headers are in the first mbuf; we can do
2322 			 * this the easy way.
2323 			 */
2324 			struct tcphdr *th;
2325 
2326 			if (v4) {
2327 				struct ip *ip =
2328 				    (void *)(mtod(m0, char *) + offset);
2329 				th = (void *)(mtod(m0, char *) + hlen);
2330 
2331 				ip->ip_len = 0;
2332 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2333 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2334 			} else {
2335 				struct ip6_hdr *ip6 =
2336 				    (void *)(mtod(m0, char *) + offset);
2337 				th = (void *)(mtod(m0, char *) + hlen);
2338 
2339 				ip6->ip6_plen = 0;
2340 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2341 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2342 			}
2343 			hlen += th->th_off << 2;
2344 		}
2345 
2346 		if (v4) {
2347 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2348 			cmdlen |= WTX_TCPIP_CMD_IP;
2349 		} else {
2350 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2351 			ipcse = 0;
2352 		}
2353 		cmd |= WTX_TCPIP_CMD_TSE;
2354 		cmdlen |= WTX_TCPIP_CMD_TSE |
2355 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2356 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2357 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2358 	}
2359 
2360 	/*
2361 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2362 	 * offload feature, if we load the context descriptor, we
2363 	 * MUST provide valid values for IPCSS and TUCSS fields.
2364 	 */
2365 
2366 	ipcs = WTX_TCPIP_IPCSS(offset) |
2367 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2368 	    WTX_TCPIP_IPCSE(ipcse);
2369 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2370 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2371 		fields |= WTX_IXSM;
2372 	}
2373 
2374 	offset += iphl;
2375 
2376 	if (m0->m_pkthdr.csum_flags &
2377 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2378 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2379 		fields |= WTX_TXSM;
2380 		tucs = WTX_TCPIP_TUCSS(offset) |
2381 		    WTX_TCPIP_TUCSO(offset +
2382 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2383 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2384 	} else if ((m0->m_pkthdr.csum_flags &
2385 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2386 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2387 		fields |= WTX_TXSM;
2388 		tucs = WTX_TCPIP_TUCSS(offset) |
2389 		    WTX_TCPIP_TUCSO(offset +
2390 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2391 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2392 	} else {
2393 		/* Just initialize it to a valid TCP context. */
2394 		tucs = WTX_TCPIP_TUCSS(offset) |
2395 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2396 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2397 	}
2398 
2399 	/* Fill in the context descriptor. */
2400 	t = (struct livengood_tcpip_ctxdesc *)
2401 	    &sc->sc_txdescs[sc->sc_txnext];
2402 	t->tcpip_ipcs = htole32(ipcs);
2403 	t->tcpip_tucs = htole32(tucs);
2404 	t->tcpip_cmdlen = htole32(cmdlen);
2405 	t->tcpip_seg = htole32(seg);
2406 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2407 
2408 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2409 	txs->txs_ndesc++;
2410 
2411 	*cmdp = cmd;
2412 	*fieldsp = fields;
2413 
2414 	return 0;
2415 }
2416 
2417 static void
2418 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2419 {
2420 	struct mbuf *m;
2421 	int i;
2422 
2423 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2424 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2425 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2426 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2427 		    m->m_data, m->m_len, m->m_flags);
2428 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2429 	    i, i == 1 ? "" : "s");
2430 }
2431 
2432 /*
2433  * wm_82547_txfifo_stall:
2434  *
2435  *	Callout used to wait for the 82547 Tx FIFO to drain,
2436  *	reset the FIFO pointers, and restart packet transmission.
2437  */
2438 static void
2439 wm_82547_txfifo_stall(void *arg)
2440 {
2441 	struct wm_softc *sc = arg;
2442 	int s;
2443 
2444 	s = splnet();
2445 
2446 	if (sc->sc_txfifo_stall) {
2447 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2448 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2449 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2450 			/*
2451 			 * Packets have drained.  Stop transmitter, reset
2452 			 * FIFO pointers, restart transmitter, and kick
2453 			 * the packet queue.
2454 			 */
2455 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2456 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2457 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2458 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2459 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2460 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2461 			CSR_WRITE(sc, WMREG_TCTL, tctl);
2462 			CSR_WRITE_FLUSH(sc);
2463 
2464 			sc->sc_txfifo_head = 0;
2465 			sc->sc_txfifo_stall = 0;
2466 			wm_start(&sc->sc_ethercom.ec_if);
2467 		} else {
2468 			/*
2469 			 * Still waiting for packets to drain; try again in
2470 			 * another tick.
2471 			 */
2472 			callout_schedule(&sc->sc_txfifo_ch, 1);
2473 		}
2474 	}
2475 
2476 	splx(s);
2477 }
2478 
2479 static void
2480 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2481 {
2482 	uint32_t reg;
2483 
2484 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2485 
2486 	if (on != 0)
2487 		reg |= EXTCNFCTR_GATE_PHY_CFG;
2488 	else
2489 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2490 
2491 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2492 }
2493 
2494 /*
2495  * wm_82547_txfifo_bugchk:
2496  *
2497  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2498  *	prevent enqueueing a packet that would wrap around the end
2499  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2500  *
2501  *	We do this by checking the amount of space before the end
2502  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2503  *	the Tx FIFO, wait for all remaining packets to drain, reset
2504  *	the internal FIFO pointers to the beginning, and restart
2505  *	transmission on the interface.
2506  */
2507 #define	WM_FIFO_HDR		0x10
2508 #define	WM_82547_PAD_LEN	0x3e0
2509 static int
2510 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2511 {
2512 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2513 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2514 
2515 	/* Just return if already stalled. */
2516 	if (sc->sc_txfifo_stall)
2517 		return 1;
2518 
2519 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2520 		/* Stall only occurs in half-duplex mode. */
2521 		goto send_packet;
2522 	}
2523 
2524 	if (len >= WM_82547_PAD_LEN + space) {
2525 		sc->sc_txfifo_stall = 1;
2526 		callout_schedule(&sc->sc_txfifo_ch, 1);
2527 		return 1;
2528 	}
2529 
2530  send_packet:
2531 	sc->sc_txfifo_head += len;
2532 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2533 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2534 
2535 	return 0;
2536 }
2537 
2538 /*
2539  * wm_start:		[ifnet interface function]
2540  *
2541  *	Start packet transmission on the interface.
2542  */
2543 static void
2544 wm_start(struct ifnet *ifp)
2545 {
2546 	struct wm_softc *sc = ifp->if_softc;
2547 	struct mbuf *m0;
2548 	struct m_tag *mtag;
2549 	struct wm_txsoft *txs;
2550 	bus_dmamap_t dmamap;
2551 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2552 	bus_addr_t curaddr;
2553 	bus_size_t seglen, curlen;
2554 	uint32_t cksumcmd;
2555 	uint8_t cksumfields;
2556 
2557 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2558 		return;
2559 
2560 	/*
2561 	 * Remember the previous number of free descriptors.
2562 	 */
2563 	ofree = sc->sc_txfree;
2564 
2565 	/*
2566 	 * Loop through the send queue, setting up transmit descriptors
2567 	 * until we drain the queue, or use up all available transmit
2568 	 * descriptors.
2569 	 */
2570 	for (;;) {
2571 		/* Grab a packet off the queue. */
2572 		IFQ_POLL(&ifp->if_snd, m0);
2573 		if (m0 == NULL)
2574 			break;
2575 
2576 		DPRINTF(WM_DEBUG_TX,
2577 		    ("%s: TX: have packet to transmit: %p\n",
2578 		    device_xname(sc->sc_dev), m0));
2579 
2580 		/* Get a work queue entry. */
2581 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2582 			wm_txintr(sc);
2583 			if (sc->sc_txsfree == 0) {
2584 				DPRINTF(WM_DEBUG_TX,
2585 				    ("%s: TX: no free job descriptors\n",
2586 					device_xname(sc->sc_dev)));
2587 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2588 				break;
2589 			}
2590 		}
2591 
2592 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2593 		dmamap = txs->txs_dmamap;
2594 
2595 		use_tso = (m0->m_pkthdr.csum_flags &
2596 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2597 
2598 		/*
2599 		 * So says the Linux driver:
2600 		 * The controller does a simple calculation to make sure
2601 		 * there is enough room in the FIFO before initiating the
2602 		 * DMA for each buffer.  The calc is:
2603 		 *	4 = ceil(buffer len / MSS)
2604 		 * To make sure we don't overrun the FIFO, adjust the max
2605 		 * buffer len if the MSS drops.
2606 		 */
2607 		dmamap->dm_maxsegsz =
2608 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2609 		    ? m0->m_pkthdr.segsz << 2
2610 		    : WTX_MAX_LEN;
2611 
2612 		/*
2613 		 * Load the DMA map.  If this fails, the packet either
2614 		 * didn't fit in the allotted number of segments, or we
2615 		 * were short on resources.  For the too-many-segments
2616 		 * case, we simply report an error and drop the packet,
2617 		 * since we can't sanely copy a jumbo packet to a single
2618 		 * buffer.
2619 		 */
2620 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2621 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2622 		if (error) {
2623 			if (error == EFBIG) {
2624 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2625 				log(LOG_ERR, "%s: Tx packet consumes too many "
2626 				    "DMA segments, dropping...\n",
2627 				    device_xname(sc->sc_dev));
2628 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2629 				wm_dump_mbuf_chain(sc, m0);
2630 				m_freem(m0);
2631 				continue;
2632 			}
2633 			/*
2634 			 * Short on resources, just stop for now.
2635 			 */
2636 			DPRINTF(WM_DEBUG_TX,
2637 			    ("%s: TX: dmamap load failed: %d\n",
2638 			    device_xname(sc->sc_dev), error));
2639 			break;
2640 		}
2641 
2642 		segs_needed = dmamap->dm_nsegs;
2643 		if (use_tso) {
2644 			/* For sentinel descriptor; see below. */
2645 			segs_needed++;
2646 		}
2647 
2648 		/*
2649 		 * Ensure we have enough descriptors free to describe
2650 		 * the packet.  Note, we always reserve one descriptor
2651 		 * at the end of the ring due to the semantics of the
2652 		 * TDT register, plus one more in the event we need
2653 		 * to load offload context.
2654 		 */
2655 		if (segs_needed > sc->sc_txfree - 2) {
2656 			/*
2657 			 * Not enough free descriptors to transmit this
2658 			 * packet.  We haven't committed anything yet,
2659 			 * so just unload the DMA map, put the packet
2660 			 * pack on the queue, and punt.  Notify the upper
2661 			 * layer that there are no more slots left.
2662 			 */
2663 			DPRINTF(WM_DEBUG_TX,
2664 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2665 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2666 			    segs_needed, sc->sc_txfree - 1));
2667 			ifp->if_flags |= IFF_OACTIVE;
2668 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2669 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2670 			break;
2671 		}
2672 
2673 		/*
2674 		 * Check for 82547 Tx FIFO bug.  We need to do this
2675 		 * once we know we can transmit the packet, since we
2676 		 * do some internal FIFO space accounting here.
2677 		 */
2678 		if (sc->sc_type == WM_T_82547 &&
2679 		    wm_82547_txfifo_bugchk(sc, m0)) {
2680 			DPRINTF(WM_DEBUG_TX,
2681 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2682 			    device_xname(sc->sc_dev)));
2683 			ifp->if_flags |= IFF_OACTIVE;
2684 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2685 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2686 			break;
2687 		}
2688 
2689 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2690 
2691 		/*
2692 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2693 		 */
2694 
2695 		DPRINTF(WM_DEBUG_TX,
2696 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2697 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2698 
2699 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2700 
2701 		/*
2702 		 * Store a pointer to the packet so that we can free it
2703 		 * later.
2704 		 *
2705 		 * Initially, we consider the number of descriptors the
2706 		 * packet uses the number of DMA segments.  This may be
2707 		 * incremented by 1 if we do checksum offload (a descriptor
2708 		 * is used to set the checksum context).
2709 		 */
2710 		txs->txs_mbuf = m0;
2711 		txs->txs_firstdesc = sc->sc_txnext;
2712 		txs->txs_ndesc = segs_needed;
2713 
2714 		/* Set up offload parameters for this packet. */
2715 		if (m0->m_pkthdr.csum_flags &
2716 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2717 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2718 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2719 			if (wm_tx_offload(sc, txs, &cksumcmd,
2720 					  &cksumfields) != 0) {
2721 				/* Error message already displayed. */
2722 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2723 				continue;
2724 			}
2725 		} else {
2726 			cksumcmd = 0;
2727 			cksumfields = 0;
2728 		}
2729 
2730 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2731 
2732 		/* Sync the DMA map. */
2733 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2734 		    BUS_DMASYNC_PREWRITE);
2735 
2736 		/*
2737 		 * Initialize the transmit descriptor.
2738 		 */
2739 		for (nexttx = sc->sc_txnext, seg = 0;
2740 		     seg < dmamap->dm_nsegs; seg++) {
2741 			for (seglen = dmamap->dm_segs[seg].ds_len,
2742 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2743 			     seglen != 0;
2744 			     curaddr += curlen, seglen -= curlen,
2745 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2746 				curlen = seglen;
2747 
2748 				/*
2749 				 * So says the Linux driver:
2750 				 * Work around for premature descriptor
2751 				 * write-backs in TSO mode.  Append a
2752 				 * 4-byte sentinel descriptor.
2753 				 */
2754 				if (use_tso &&
2755 				    seg == dmamap->dm_nsegs - 1 &&
2756 				    curlen > 8)
2757 					curlen -= 4;
2758 
2759 				wm_set_dma_addr(
2760 				    &sc->sc_txdescs[nexttx].wtx_addr,
2761 				    curaddr);
2762 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2763 				    htole32(cksumcmd | curlen);
2764 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2765 				    0;
2766 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2767 				    cksumfields;
2768 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2769 				lasttx = nexttx;
2770 
2771 				DPRINTF(WM_DEBUG_TX,
2772 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
2773 				     "len %#04zx\n",
2774 				    device_xname(sc->sc_dev), nexttx,
2775 				    (uint64_t)curaddr, curlen));
2776 			}
2777 		}
2778 
2779 		KASSERT(lasttx != -1);
2780 
2781 		/*
2782 		 * Set up the command byte on the last descriptor of
2783 		 * the packet.  If we're in the interrupt delay window,
2784 		 * delay the interrupt.
2785 		 */
2786 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2787 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2788 
2789 		/*
2790 		 * If VLANs are enabled and the packet has a VLAN tag, set
2791 		 * up the descriptor to encapsulate the packet for us.
2792 		 *
2793 		 * This is only valid on the last descriptor of the packet.
2794 		 */
2795 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2796 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2797 			    htole32(WTX_CMD_VLE);
2798 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2799 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2800 		}
2801 
2802 		txs->txs_lastdesc = lasttx;
2803 
2804 		DPRINTF(WM_DEBUG_TX,
2805 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2806 		    device_xname(sc->sc_dev),
2807 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2808 
2809 		/* Sync the descriptors we're using. */
2810 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2811 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2812 
2813 		/* Give the packet to the chip. */
2814 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2815 
2816 		DPRINTF(WM_DEBUG_TX,
2817 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2818 
2819 		DPRINTF(WM_DEBUG_TX,
2820 		    ("%s: TX: finished transmitting packet, job %d\n",
2821 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2822 
2823 		/* Advance the tx pointer. */
2824 		sc->sc_txfree -= txs->txs_ndesc;
2825 		sc->sc_txnext = nexttx;
2826 
2827 		sc->sc_txsfree--;
2828 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2829 
2830 		/* Pass the packet to any BPF listeners. */
2831 		bpf_mtap(ifp, m0);
2832 	}
2833 
2834 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2835 		/* No more slots; notify upper layer. */
2836 		ifp->if_flags |= IFF_OACTIVE;
2837 	}
2838 
2839 	if (sc->sc_txfree != ofree) {
2840 		/* Set a watchdog timer in case the chip flakes out. */
2841 		ifp->if_timer = 5;
2842 	}
2843 }
2844 
2845 /*
2846  * wm_nq_tx_offload:
2847  *
2848  *	Set up TCP/IP checksumming parameters for the
2849  *	specified packet, for NEWQUEUE devices
2850  */
2851 static int
2852 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2853     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2854 {
2855 	struct mbuf *m0 = txs->txs_mbuf;
2856 	struct m_tag *mtag;
2857 	uint32_t vl_len, mssidx, cmdc;
2858 	struct ether_header *eh;
2859 	int offset, iphl;
2860 
2861 	/*
2862 	 * XXX It would be nice if the mbuf pkthdr had offset
2863 	 * fields for the protocol headers.
2864 	 */
2865 	*cmdlenp = 0;
2866 	*fieldsp = 0;
2867 
2868 	eh = mtod(m0, struct ether_header *);
2869 	switch (htons(eh->ether_type)) {
2870 	case ETHERTYPE_IP:
2871 	case ETHERTYPE_IPV6:
2872 		offset = ETHER_HDR_LEN;
2873 		break;
2874 
2875 	case ETHERTYPE_VLAN:
2876 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2877 		break;
2878 
2879 	default:
2880 		/*
2881 		 * Don't support this protocol or encapsulation.
2882 		 */
2883 		*do_csum = false;
2884 		return 0;
2885 	}
2886 	*do_csum = true;
2887 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2888 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2889 
2890 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2891 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2892 
2893 	if ((m0->m_pkthdr.csum_flags &
2894 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2895 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2896 	} else {
2897 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2898 	}
2899 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2900 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2901 
2902 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2903 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2904 		     << NQTXC_VLLEN_VLAN_SHIFT);
2905 		*cmdlenp |= NQTX_CMD_VLE;
2906 	}
2907 
2908 	mssidx = 0;
2909 
2910 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2911 		int hlen = offset + iphl;
2912 		int tcp_hlen;
2913 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2914 
2915 		if (__predict_false(m0->m_len <
2916 				    (hlen + sizeof(struct tcphdr)))) {
2917 			/*
2918 			 * TCP/IP headers are not in the first mbuf; we need
2919 			 * to do this the slow and painful way.  Let's just
2920 			 * hope this doesn't happen very often.
2921 			 */
2922 			struct tcphdr th;
2923 
2924 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2925 
2926 			m_copydata(m0, hlen, sizeof(th), &th);
2927 			if (v4) {
2928 				struct ip ip;
2929 
2930 				m_copydata(m0, offset, sizeof(ip), &ip);
2931 				ip.ip_len = 0;
2932 				m_copyback(m0,
2933 				    offset + offsetof(struct ip, ip_len),
2934 				    sizeof(ip.ip_len), &ip.ip_len);
2935 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2936 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2937 			} else {
2938 				struct ip6_hdr ip6;
2939 
2940 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2941 				ip6.ip6_plen = 0;
2942 				m_copyback(m0,
2943 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2944 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2945 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2946 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2947 			}
2948 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2949 			    sizeof(th.th_sum), &th.th_sum);
2950 
2951 			tcp_hlen = th.th_off << 2;
2952 		} else {
2953 			/*
2954 			 * TCP/IP headers are in the first mbuf; we can do
2955 			 * this the easy way.
2956 			 */
2957 			struct tcphdr *th;
2958 
2959 			if (v4) {
2960 				struct ip *ip =
2961 				    (void *)(mtod(m0, char *) + offset);
2962 				th = (void *)(mtod(m0, char *) + hlen);
2963 
2964 				ip->ip_len = 0;
2965 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2966 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2967 			} else {
2968 				struct ip6_hdr *ip6 =
2969 				    (void *)(mtod(m0, char *) + offset);
2970 				th = (void *)(mtod(m0, char *) + hlen);
2971 
2972 				ip6->ip6_plen = 0;
2973 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2974 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2975 			}
2976 			tcp_hlen = th->th_off << 2;
2977 		}
2978 		hlen += tcp_hlen;
2979 		*cmdlenp |= NQTX_CMD_TSE;
2980 
2981 		if (v4) {
2982 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2983 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2984 		} else {
2985 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2986 			*fieldsp |= NQTXD_FIELDS_TUXSM;
2987 		}
2988 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2989 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2990 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2991 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2992 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2993 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2994 	} else {
2995 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2996 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2997 	}
2998 
2999 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
3000 		*fieldsp |= NQTXD_FIELDS_IXSM;
3001 		cmdc |= NQTXC_CMD_IP4;
3002 	}
3003 
3004 	if (m0->m_pkthdr.csum_flags &
3005 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3006 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
3007 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3008 			cmdc |= NQTXC_CMD_TCP;
3009 		} else {
3010 			cmdc |= NQTXC_CMD_UDP;
3011 		}
3012 		cmdc |= NQTXC_CMD_IP4;
3013 		*fieldsp |= NQTXD_FIELDS_TUXSM;
3014 	}
3015 	if (m0->m_pkthdr.csum_flags &
3016 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3017 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
3018 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3019 			cmdc |= NQTXC_CMD_TCP;
3020 		} else {
3021 			cmdc |= NQTXC_CMD_UDP;
3022 		}
3023 		cmdc |= NQTXC_CMD_IP6;
3024 		*fieldsp |= NQTXD_FIELDS_TUXSM;
3025 	}
3026 
3027 	/* Fill in the context descriptor. */
3028 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
3029 	    htole32(vl_len);
3030 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
3031 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
3032 	    htole32(cmdc);
3033 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
3034 	    htole32(mssidx);
3035 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
3036 	DPRINTF(WM_DEBUG_TX,
3037 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
3038 	    sc->sc_txnext, 0, vl_len));
3039 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
3040 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
3041 	txs->txs_ndesc++;
3042 	return 0;
3043 }
3044 
3045 /*
3046  * wm_nq_start:		[ifnet interface function]
3047  *
3048  *	Start packet transmission on the interface for NEWQUEUE devices
3049  */
3050 static void
3051 wm_nq_start(struct ifnet *ifp)
3052 {
3053 	struct wm_softc *sc = ifp->if_softc;
3054 	struct mbuf *m0;
3055 	struct m_tag *mtag;
3056 	struct wm_txsoft *txs;
3057 	bus_dmamap_t dmamap;
3058 	int error, nexttx, lasttx = -1, seg, segs_needed;
3059 	bool do_csum, sent;
3060 
3061 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3062 		return;
3063 
3064 	sent = false;
3065 
3066 	/*
3067 	 * Loop through the send queue, setting up transmit descriptors
3068 	 * until we drain the queue, or use up all available transmit
3069 	 * descriptors.
3070 	 */
3071 	for (;;) {
3072 		/* Grab a packet off the queue. */
3073 		IFQ_POLL(&ifp->if_snd, m0);
3074 		if (m0 == NULL)
3075 			break;
3076 
3077 		DPRINTF(WM_DEBUG_TX,
3078 		    ("%s: TX: have packet to transmit: %p\n",
3079 		    device_xname(sc->sc_dev), m0));
3080 
3081 		/* Get a work queue entry. */
3082 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3083 			wm_txintr(sc);
3084 			if (sc->sc_txsfree == 0) {
3085 				DPRINTF(WM_DEBUG_TX,
3086 				    ("%s: TX: no free job descriptors\n",
3087 					device_xname(sc->sc_dev)));
3088 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3089 				break;
3090 			}
3091 		}
3092 
3093 		txs = &sc->sc_txsoft[sc->sc_txsnext];
3094 		dmamap = txs->txs_dmamap;
3095 
3096 		/*
3097 		 * Load the DMA map.  If this fails, the packet either
3098 		 * didn't fit in the allotted number of segments, or we
3099 		 * were short on resources.  For the too-many-segments
3100 		 * case, we simply report an error and drop the packet,
3101 		 * since we can't sanely copy a jumbo packet to a single
3102 		 * buffer.
3103 		 */
3104 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3105 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3106 		if (error) {
3107 			if (error == EFBIG) {
3108 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3109 				log(LOG_ERR, "%s: Tx packet consumes too many "
3110 				    "DMA segments, dropping...\n",
3111 				    device_xname(sc->sc_dev));
3112 				IFQ_DEQUEUE(&ifp->if_snd, m0);
3113 				wm_dump_mbuf_chain(sc, m0);
3114 				m_freem(m0);
3115 				continue;
3116 			}
3117 			/*
3118 			 * Short on resources, just stop for now.
3119 			 */
3120 			DPRINTF(WM_DEBUG_TX,
3121 			    ("%s: TX: dmamap load failed: %d\n",
3122 			    device_xname(sc->sc_dev), error));
3123 			break;
3124 		}
3125 
3126 		segs_needed = dmamap->dm_nsegs;
3127 
3128 		/*
3129 		 * Ensure we have enough descriptors free to describe
3130 		 * the packet.  Note, we always reserve one descriptor
3131 		 * at the end of the ring due to the semantics of the
3132 		 * TDT register, plus one more in the event we need
3133 		 * to load offload context.
3134 		 */
3135 		if (segs_needed > sc->sc_txfree - 2) {
3136 			/*
3137 			 * Not enough free descriptors to transmit this
3138 			 * packet.  We haven't committed anything yet,
3139 			 * so just unload the DMA map, put the packet
3140 			 * pack on the queue, and punt.  Notify the upper
3141 			 * layer that there are no more slots left.
3142 			 */
3143 			DPRINTF(WM_DEBUG_TX,
3144 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
3145 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
3146 			    segs_needed, sc->sc_txfree - 1));
3147 			ifp->if_flags |= IFF_OACTIVE;
3148 			bus_dmamap_unload(sc->sc_dmat, dmamap);
3149 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3150 			break;
3151 		}
3152 
3153 		IFQ_DEQUEUE(&ifp->if_snd, m0);
3154 
3155 		/*
3156 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3157 		 */
3158 
3159 		DPRINTF(WM_DEBUG_TX,
3160 		    ("%s: TX: packet has %d (%d) DMA segments\n",
3161 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3162 
3163 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3164 
3165 		/*
3166 		 * Store a pointer to the packet so that we can free it
3167 		 * later.
3168 		 *
3169 		 * Initially, we consider the number of descriptors the
3170 		 * packet uses the number of DMA segments.  This may be
3171 		 * incremented by 1 if we do checksum offload (a descriptor
3172 		 * is used to set the checksum context).
3173 		 */
3174 		txs->txs_mbuf = m0;
3175 		txs->txs_firstdesc = sc->sc_txnext;
3176 		txs->txs_ndesc = segs_needed;
3177 
3178 		/* Set up offload parameters for this packet. */
3179 		uint32_t cmdlen, fields, dcmdlen;
3180 		if (m0->m_pkthdr.csum_flags &
3181 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
3182 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3183 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3184 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3185 			    &do_csum) != 0) {
3186 				/* Error message already displayed. */
3187 				bus_dmamap_unload(sc->sc_dmat, dmamap);
3188 				continue;
3189 			}
3190 		} else {
3191 			do_csum = false;
3192 			cmdlen = 0;
3193 			fields = 0;
3194 		}
3195 
3196 		/* Sync the DMA map. */
3197 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3198 		    BUS_DMASYNC_PREWRITE);
3199 
3200 		/*
3201 		 * Initialize the first transmit descriptor.
3202 		 */
3203 		nexttx = sc->sc_txnext;
3204 		if (!do_csum) {
3205 			/* setup a legacy descriptor */
3206 			wm_set_dma_addr(
3207 			    &sc->sc_txdescs[nexttx].wtx_addr,
3208 			    dmamap->dm_segs[0].ds_addr);
3209 			sc->sc_txdescs[nexttx].wtx_cmdlen =
3210 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3211 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3212 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3213 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3214 			    NULL) {
3215 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
3216 				    htole32(WTX_CMD_VLE);
3217 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3218 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3219 			} else {
3220 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3221 			}
3222 			dcmdlen = 0;
3223 		} else {
3224 			/* setup an advanced data descriptor */
3225 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3226 			    htole64(dmamap->dm_segs[0].ds_addr);
3227 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3228 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3229 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3230 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3231 			    htole32(fields);
3232 			DPRINTF(WM_DEBUG_TX,
3233 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3234 			    device_xname(sc->sc_dev), nexttx,
3235 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
3236 			DPRINTF(WM_DEBUG_TX,
3237 			    ("\t 0x%08x%08x\n", fields,
3238 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3239 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3240 		}
3241 
3242 		lasttx = nexttx;
3243 		nexttx = WM_NEXTTX(sc, nexttx);
3244 		/*
3245 		 * fill in the next descriptors. legacy or adcanced format
3246 		 * is the same here
3247 		 */
3248 		for (seg = 1; seg < dmamap->dm_nsegs;
3249 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3250 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3251 			    htole64(dmamap->dm_segs[seg].ds_addr);
3252 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3253 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3254 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3255 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3256 			lasttx = nexttx;
3257 
3258 			DPRINTF(WM_DEBUG_TX,
3259 			    ("%s: TX: desc %d: %#" PRIx64 ", "
3260 			     "len %#04zx\n",
3261 			    device_xname(sc->sc_dev), nexttx,
3262 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
3263 			    dmamap->dm_segs[seg].ds_len));
3264 		}
3265 
3266 		KASSERT(lasttx != -1);
3267 
3268 		/*
3269 		 * Set up the command byte on the last descriptor of
3270 		 * the packet.  If we're in the interrupt delay window,
3271 		 * delay the interrupt.
3272 		 */
3273 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3274 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
3275 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
3276 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
3277 
3278 		txs->txs_lastdesc = lasttx;
3279 
3280 		DPRINTF(WM_DEBUG_TX,
3281 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
3282 		    device_xname(sc->sc_dev),
3283 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3284 
3285 		/* Sync the descriptors we're using. */
3286 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3287 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3288 
3289 		/* Give the packet to the chip. */
3290 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3291 		sent = true;
3292 
3293 		DPRINTF(WM_DEBUG_TX,
3294 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3295 
3296 		DPRINTF(WM_DEBUG_TX,
3297 		    ("%s: TX: finished transmitting packet, job %d\n",
3298 		    device_xname(sc->sc_dev), sc->sc_txsnext));
3299 
3300 		/* Advance the tx pointer. */
3301 		sc->sc_txfree -= txs->txs_ndesc;
3302 		sc->sc_txnext = nexttx;
3303 
3304 		sc->sc_txsfree--;
3305 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3306 
3307 		/* Pass the packet to any BPF listeners. */
3308 		bpf_mtap(ifp, m0);
3309 	}
3310 
3311 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3312 		/* No more slots; notify upper layer. */
3313 		ifp->if_flags |= IFF_OACTIVE;
3314 	}
3315 
3316 	if (sent) {
3317 		/* Set a watchdog timer in case the chip flakes out. */
3318 		ifp->if_timer = 5;
3319 	}
3320 }
3321 
3322 /*
3323  * wm_watchdog:		[ifnet interface function]
3324  *
3325  *	Watchdog timer handler.
3326  */
3327 static void
3328 wm_watchdog(struct ifnet *ifp)
3329 {
3330 	struct wm_softc *sc = ifp->if_softc;
3331 
3332 	/*
3333 	 * Since we're using delayed interrupts, sweep up
3334 	 * before we report an error.
3335 	 */
3336 	wm_txintr(sc);
3337 
3338 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
3339 #ifdef WM_DEBUG
3340 		int i, j;
3341 		struct wm_txsoft *txs;
3342 #endif
3343 		log(LOG_ERR,
3344 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3345 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3346 		    sc->sc_txnext);
3347 		ifp->if_oerrors++;
3348 #ifdef WM_DEBUG
3349 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3350 		    i = WM_NEXTTXS(sc, i)) {
3351 		    txs = &sc->sc_txsoft[i];
3352 		    printf("txs %d tx %d -> %d\n",
3353 			i, txs->txs_firstdesc, txs->txs_lastdesc);
3354 		    for (j = txs->txs_firstdesc; ;
3355 			j = WM_NEXTTX(sc, j)) {
3356 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3357 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3358 			printf("\t %#08x%08x\n",
3359 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3360 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3361 			if (j == txs->txs_lastdesc)
3362 				break;
3363 			}
3364 		}
3365 #endif
3366 		/* Reset the interface. */
3367 		(void) wm_init(ifp);
3368 	}
3369 
3370 	/* Try to get more packets going. */
3371 	ifp->if_start(ifp);
3372 }
3373 
3374 static int
3375 wm_ifflags_cb(struct ethercom *ec)
3376 {
3377 	struct ifnet *ifp = &ec->ec_if;
3378 	struct wm_softc *sc = ifp->if_softc;
3379 	int change = ifp->if_flags ^ sc->sc_if_flags;
3380 
3381 	if (change != 0)
3382 		sc->sc_if_flags = ifp->if_flags;
3383 
3384 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3385 		return ENETRESET;
3386 
3387 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3388 		wm_set_filter(sc);
3389 
3390 	wm_set_vlan(sc);
3391 
3392 	return 0;
3393 }
3394 
3395 /*
3396  * wm_ioctl:		[ifnet interface function]
3397  *
3398  *	Handle control requests from the operator.
3399  */
3400 static int
3401 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3402 {
3403 	struct wm_softc *sc = ifp->if_softc;
3404 	struct ifreq *ifr = (struct ifreq *) data;
3405 	struct ifaddr *ifa = (struct ifaddr *)data;
3406 	struct sockaddr_dl *sdl;
3407 	int s, error;
3408 
3409 	s = splnet();
3410 
3411 	switch (cmd) {
3412 	case SIOCSIFMEDIA:
3413 	case SIOCGIFMEDIA:
3414 		/* Flow control requires full-duplex mode. */
3415 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3416 		    (ifr->ifr_media & IFM_FDX) == 0)
3417 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3418 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3419 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3420 				/* We can do both TXPAUSE and RXPAUSE. */
3421 				ifr->ifr_media |=
3422 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3423 			}
3424 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3425 		}
3426 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3427 		break;
3428 	case SIOCINITIFADDR:
3429 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3430 			sdl = satosdl(ifp->if_dl->ifa_addr);
3431 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3432 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3433 			/* unicast address is first multicast entry */
3434 			wm_set_filter(sc);
3435 			error = 0;
3436 			break;
3437 		}
3438 		/*FALLTHROUGH*/
3439 	default:
3440 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3441 			break;
3442 
3443 		error = 0;
3444 
3445 		if (cmd == SIOCSIFCAP)
3446 			error = (*ifp->if_init)(ifp);
3447 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3448 			;
3449 		else if (ifp->if_flags & IFF_RUNNING) {
3450 			/*
3451 			 * Multicast list has changed; set the hardware filter
3452 			 * accordingly.
3453 			 */
3454 			wm_set_filter(sc);
3455 		}
3456 		break;
3457 	}
3458 
3459 	/* Try to get more packets going. */
3460 	ifp->if_start(ifp);
3461 
3462 	splx(s);
3463 	return error;
3464 }
3465 
3466 /*
3467  * wm_intr:
3468  *
3469  *	Interrupt service routine.
3470  */
3471 static int
3472 wm_intr(void *arg)
3473 {
3474 	struct wm_softc *sc = arg;
3475 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3476 	uint32_t icr;
3477 	int handled = 0;
3478 
3479 	while (1 /* CONSTCOND */) {
3480 		icr = CSR_READ(sc, WMREG_ICR);
3481 		if ((icr & sc->sc_icr) == 0)
3482 			break;
3483 		rnd_add_uint32(&sc->rnd_source, icr);
3484 
3485 		handled = 1;
3486 
3487 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3488 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3489 			DPRINTF(WM_DEBUG_RX,
3490 			    ("%s: RX: got Rx intr 0x%08x\n",
3491 			    device_xname(sc->sc_dev),
3492 			    icr & (ICR_RXDMT0|ICR_RXT0)));
3493 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3494 		}
3495 #endif
3496 		wm_rxintr(sc);
3497 
3498 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3499 		if (icr & ICR_TXDW) {
3500 			DPRINTF(WM_DEBUG_TX,
3501 			    ("%s: TX: got TXDW interrupt\n",
3502 			    device_xname(sc->sc_dev)));
3503 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
3504 		}
3505 #endif
3506 		wm_txintr(sc);
3507 
3508 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3509 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3510 			wm_linkintr(sc, icr);
3511 		}
3512 
3513 		if (icr & ICR_RXO) {
3514 #if defined(WM_DEBUG)
3515 			log(LOG_WARNING, "%s: Receive overrun\n",
3516 			    device_xname(sc->sc_dev));
3517 #endif /* defined(WM_DEBUG) */
3518 		}
3519 	}
3520 
3521 	if (handled) {
3522 		/* Try to get more packets going. */
3523 		ifp->if_start(ifp);
3524 	}
3525 
3526 	return handled;
3527 }
3528 
3529 /*
3530  * wm_txintr:
3531  *
3532  *	Helper; handle transmit interrupts.
3533  */
3534 static void
3535 wm_txintr(struct wm_softc *sc)
3536 {
3537 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3538 	struct wm_txsoft *txs;
3539 	uint8_t status;
3540 	int i;
3541 
3542 	ifp->if_flags &= ~IFF_OACTIVE;
3543 
3544 	/*
3545 	 * Go through the Tx list and free mbufs for those
3546 	 * frames which have been transmitted.
3547 	 */
3548 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3549 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3550 		txs = &sc->sc_txsoft[i];
3551 
3552 		DPRINTF(WM_DEBUG_TX,
3553 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3554 
3555 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3556 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3557 
3558 		status =
3559 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3560 		if ((status & WTX_ST_DD) == 0) {
3561 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3562 			    BUS_DMASYNC_PREREAD);
3563 			break;
3564 		}
3565 
3566 		DPRINTF(WM_DEBUG_TX,
3567 		    ("%s: TX: job %d done: descs %d..%d\n",
3568 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3569 		    txs->txs_lastdesc));
3570 
3571 		/*
3572 		 * XXX We should probably be using the statistics
3573 		 * XXX registers, but I don't know if they exist
3574 		 * XXX on chips before the i82544.
3575 		 */
3576 
3577 #ifdef WM_EVENT_COUNTERS
3578 		if (status & WTX_ST_TU)
3579 			WM_EVCNT_INCR(&sc->sc_ev_tu);
3580 #endif /* WM_EVENT_COUNTERS */
3581 
3582 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
3583 			ifp->if_oerrors++;
3584 			if (status & WTX_ST_LC)
3585 				log(LOG_WARNING, "%s: late collision\n",
3586 				    device_xname(sc->sc_dev));
3587 			else if (status & WTX_ST_EC) {
3588 				ifp->if_collisions += 16;
3589 				log(LOG_WARNING, "%s: excessive collisions\n",
3590 				    device_xname(sc->sc_dev));
3591 			}
3592 		} else
3593 			ifp->if_opackets++;
3594 
3595 		sc->sc_txfree += txs->txs_ndesc;
3596 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3597 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3598 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3599 		m_freem(txs->txs_mbuf);
3600 		txs->txs_mbuf = NULL;
3601 	}
3602 
3603 	/* Update the dirty transmit buffer pointer. */
3604 	sc->sc_txsdirty = i;
3605 	DPRINTF(WM_DEBUG_TX,
3606 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3607 
3608 	/*
3609 	 * If there are no more pending transmissions, cancel the watchdog
3610 	 * timer.
3611 	 */
3612 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3613 		ifp->if_timer = 0;
3614 }
3615 
3616 /*
3617  * wm_rxintr:
3618  *
3619  *	Helper; handle receive interrupts.
3620  */
3621 static void
3622 wm_rxintr(struct wm_softc *sc)
3623 {
3624 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3625 	struct wm_rxsoft *rxs;
3626 	struct mbuf *m;
3627 	int i, len;
3628 	uint8_t status, errors;
3629 	uint16_t vlantag;
3630 
3631 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3632 		rxs = &sc->sc_rxsoft[i];
3633 
3634 		DPRINTF(WM_DEBUG_RX,
3635 		    ("%s: RX: checking descriptor %d\n",
3636 		    device_xname(sc->sc_dev), i));
3637 
3638 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3639 
3640 		status = sc->sc_rxdescs[i].wrx_status;
3641 		errors = sc->sc_rxdescs[i].wrx_errors;
3642 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
3643 		vlantag = sc->sc_rxdescs[i].wrx_special;
3644 
3645 		if ((status & WRX_ST_DD) == 0) {
3646 			/*
3647 			 * We have processed all of the receive descriptors.
3648 			 */
3649 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3650 			break;
3651 		}
3652 
3653 		if (__predict_false(sc->sc_rxdiscard)) {
3654 			DPRINTF(WM_DEBUG_RX,
3655 			    ("%s: RX: discarding contents of descriptor %d\n",
3656 			    device_xname(sc->sc_dev), i));
3657 			WM_INIT_RXDESC(sc, i);
3658 			if (status & WRX_ST_EOP) {
3659 				/* Reset our state. */
3660 				DPRINTF(WM_DEBUG_RX,
3661 				    ("%s: RX: resetting rxdiscard -> 0\n",
3662 				    device_xname(sc->sc_dev)));
3663 				sc->sc_rxdiscard = 0;
3664 			}
3665 			continue;
3666 		}
3667 
3668 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3669 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3670 
3671 		m = rxs->rxs_mbuf;
3672 
3673 		/*
3674 		 * Add a new receive buffer to the ring, unless of
3675 		 * course the length is zero. Treat the latter as a
3676 		 * failed mapping.
3677 		 */
3678 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3679 			/*
3680 			 * Failed, throw away what we've done so
3681 			 * far, and discard the rest of the packet.
3682 			 */
3683 			ifp->if_ierrors++;
3684 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3685 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3686 			WM_INIT_RXDESC(sc, i);
3687 			if ((status & WRX_ST_EOP) == 0)
3688 				sc->sc_rxdiscard = 1;
3689 			if (sc->sc_rxhead != NULL)
3690 				m_freem(sc->sc_rxhead);
3691 			WM_RXCHAIN_RESET(sc);
3692 			DPRINTF(WM_DEBUG_RX,
3693 			    ("%s: RX: Rx buffer allocation failed, "
3694 			    "dropping packet%s\n", device_xname(sc->sc_dev),
3695 			    sc->sc_rxdiscard ? " (discard)" : ""));
3696 			continue;
3697 		}
3698 
3699 		m->m_len = len;
3700 		sc->sc_rxlen += len;
3701 		DPRINTF(WM_DEBUG_RX,
3702 		    ("%s: RX: buffer at %p len %d\n",
3703 		    device_xname(sc->sc_dev), m->m_data, len));
3704 
3705 		/*
3706 		 * If this is not the end of the packet, keep
3707 		 * looking.
3708 		 */
3709 		if ((status & WRX_ST_EOP) == 0) {
3710 			WM_RXCHAIN_LINK(sc, m);
3711 			DPRINTF(WM_DEBUG_RX,
3712 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
3713 			    device_xname(sc->sc_dev), sc->sc_rxlen));
3714 			continue;
3715 		}
3716 
3717 		/*
3718 		 * Okay, we have the entire packet now.  The chip is
3719 		 * configured to include the FCS except I350 and I21[01]
3720 		 * (not all chips can be configured to strip it),
3721 		 * so we need to trim it.
3722 		 * May need to adjust length of previous mbuf in the
3723 		 * chain if the current mbuf is too short.
3724 		 * For an eratta, the RCTL_SECRC bit in RCTL register
3725 		 * is always set in I350, so we don't trim it.
3726 		 */
3727 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
3728 		    && (sc->sc_type != WM_T_I210)
3729 		    && (sc->sc_type != WM_T_I211)) {
3730 			if (m->m_len < ETHER_CRC_LEN) {
3731 				sc->sc_rxtail->m_len
3732 				    -= (ETHER_CRC_LEN - m->m_len);
3733 				m->m_len = 0;
3734 			} else
3735 				m->m_len -= ETHER_CRC_LEN;
3736 			len = sc->sc_rxlen - ETHER_CRC_LEN;
3737 		} else
3738 			len = sc->sc_rxlen;
3739 
3740 		WM_RXCHAIN_LINK(sc, m);
3741 
3742 		*sc->sc_rxtailp = NULL;
3743 		m = sc->sc_rxhead;
3744 
3745 		WM_RXCHAIN_RESET(sc);
3746 
3747 		DPRINTF(WM_DEBUG_RX,
3748 		    ("%s: RX: have entire packet, len -> %d\n",
3749 		    device_xname(sc->sc_dev), len));
3750 
3751 		/*
3752 		 * If an error occurred, update stats and drop the packet.
3753 		 */
3754 		if (errors &
3755 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3756 			if (errors & WRX_ER_SE)
3757 				log(LOG_WARNING, "%s: symbol error\n",
3758 				    device_xname(sc->sc_dev));
3759 			else if (errors & WRX_ER_SEQ)
3760 				log(LOG_WARNING, "%s: receive sequence error\n",
3761 				    device_xname(sc->sc_dev));
3762 			else if (errors & WRX_ER_CE)
3763 				log(LOG_WARNING, "%s: CRC error\n",
3764 				    device_xname(sc->sc_dev));
3765 			m_freem(m);
3766 			continue;
3767 		}
3768 
3769 		/*
3770 		 * No errors.  Receive the packet.
3771 		 */
3772 		m->m_pkthdr.rcvif = ifp;
3773 		m->m_pkthdr.len = len;
3774 
3775 		/*
3776 		 * If VLANs are enabled, VLAN packets have been unwrapped
3777 		 * for us.  Associate the tag with the packet.
3778 		 */
3779 		/* XXXX should check for i350 and i354 */
3780 		if ((status & WRX_ST_VP) != 0) {
3781 			VLAN_INPUT_TAG(ifp, m,
3782 			    le16toh(vlantag),
3783 			    continue);
3784 		}
3785 
3786 		/*
3787 		 * Set up checksum info for this packet.
3788 		 */
3789 		if ((status & WRX_ST_IXSM) == 0) {
3790 			if (status & WRX_ST_IPCS) {
3791 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3792 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3793 				if (errors & WRX_ER_IPE)
3794 					m->m_pkthdr.csum_flags |=
3795 					    M_CSUM_IPv4_BAD;
3796 			}
3797 			if (status & WRX_ST_TCPCS) {
3798 				/*
3799 				 * Note: we don't know if this was TCP or UDP,
3800 				 * so we just set both bits, and expect the
3801 				 * upper layers to deal.
3802 				 */
3803 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3804 				m->m_pkthdr.csum_flags |=
3805 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3806 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
3807 				if (errors & WRX_ER_TCPE)
3808 					m->m_pkthdr.csum_flags |=
3809 					    M_CSUM_TCP_UDP_BAD;
3810 			}
3811 		}
3812 
3813 		ifp->if_ipackets++;
3814 
3815 		/* Pass this up to any BPF listeners. */
3816 		bpf_mtap(ifp, m);
3817 
3818 		/* Pass it on. */
3819 		(*ifp->if_input)(ifp, m);
3820 	}
3821 
3822 	/* Update the receive pointer. */
3823 	sc->sc_rxptr = i;
3824 
3825 	DPRINTF(WM_DEBUG_RX,
3826 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3827 }
3828 
3829 /*
3830  * wm_linkintr_gmii:
3831  *
3832  *	Helper; handle link interrupts for GMII.
3833  */
3834 static void
3835 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3836 {
3837 
3838 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3839 		__func__));
3840 
3841 	if (icr & ICR_LSC) {
3842 		DPRINTF(WM_DEBUG_LINK,
3843 		    ("%s: LINK: LSC -> mii_pollstat\n",
3844 			device_xname(sc->sc_dev)));
3845 		mii_pollstat(&sc->sc_mii);
3846 		if (sc->sc_type == WM_T_82543) {
3847 			int miistatus, active;
3848 
3849 			/*
3850 			 * With 82543, we need to force speed and
3851 			 * duplex on the MAC equal to what the PHY
3852 			 * speed and duplex configuration is.
3853 			 */
3854 			miistatus = sc->sc_mii.mii_media_status;
3855 
3856 			if (miistatus & IFM_ACTIVE) {
3857 				active = sc->sc_mii.mii_media_active;
3858 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3859 				switch (IFM_SUBTYPE(active)) {
3860 				case IFM_10_T:
3861 					sc->sc_ctrl |= CTRL_SPEED_10;
3862 					break;
3863 				case IFM_100_TX:
3864 					sc->sc_ctrl |= CTRL_SPEED_100;
3865 					break;
3866 				case IFM_1000_T:
3867 					sc->sc_ctrl |= CTRL_SPEED_1000;
3868 					break;
3869 				default:
3870 					/*
3871 					 * fiber?
3872 					 * Shoud not enter here.
3873 					 */
3874 					printf("unknown media (%x)\n",
3875 					    active);
3876 					break;
3877 				}
3878 				if (active & IFM_FDX)
3879 					sc->sc_ctrl |= CTRL_FD;
3880 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3881 			}
3882 		} else if ((sc->sc_type == WM_T_ICH8)
3883 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
3884 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
3885 		} else if (sc->sc_type == WM_T_PCH) {
3886 			wm_k1_gig_workaround_hv(sc,
3887 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3888 		}
3889 
3890 		if ((sc->sc_phytype == WMPHY_82578)
3891 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3892 			== IFM_1000_T)) {
3893 
3894 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3895 				delay(200*1000); /* XXX too big */
3896 
3897 				/* Link stall fix for link up */
3898 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3899 				    HV_MUX_DATA_CTRL,
3900 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
3901 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
3902 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3903 				    HV_MUX_DATA_CTRL,
3904 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
3905 			}
3906 		}
3907 	} else if (icr & ICR_RXSEQ) {
3908 		DPRINTF(WM_DEBUG_LINK,
3909 		    ("%s: LINK Receive sequence error\n",
3910 			device_xname(sc->sc_dev)));
3911 	}
3912 }
3913 
3914 /*
3915  * wm_linkintr_tbi:
3916  *
3917  *	Helper; handle link interrupts for TBI mode.
3918  */
3919 static void
3920 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3921 {
3922 	uint32_t status;
3923 
3924 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3925 		__func__));
3926 
3927 	status = CSR_READ(sc, WMREG_STATUS);
3928 	if (icr & ICR_LSC) {
3929 		if (status & STATUS_LU) {
3930 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3931 			    device_xname(sc->sc_dev),
3932 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3933 			/*
3934 			 * NOTE: CTRL will update TFCE and RFCE automatically,
3935 			 * so we should update sc->sc_ctrl
3936 			 */
3937 
3938 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3939 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3940 			sc->sc_fcrtl &= ~FCRTL_XONE;
3941 			if (status & STATUS_FD)
3942 				sc->sc_tctl |=
3943 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3944 			else
3945 				sc->sc_tctl |=
3946 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3947 			if (sc->sc_ctrl & CTRL_TFCE)
3948 				sc->sc_fcrtl |= FCRTL_XONE;
3949 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3950 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3951 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3952 				      sc->sc_fcrtl);
3953 			sc->sc_tbi_linkup = 1;
3954 		} else {
3955 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3956 			    device_xname(sc->sc_dev)));
3957 			sc->sc_tbi_linkup = 0;
3958 		}
3959 		wm_tbi_set_linkled(sc);
3960 	} else if (icr & ICR_RXCFG) {
3961 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3962 		    device_xname(sc->sc_dev)));
3963 		sc->sc_tbi_nrxcfg++;
3964 		wm_check_for_link(sc);
3965 	} else if (icr & ICR_RXSEQ) {
3966 		DPRINTF(WM_DEBUG_LINK,
3967 		    ("%s: LINK: Receive sequence error\n",
3968 		    device_xname(sc->sc_dev)));
3969 	}
3970 }
3971 
3972 /*
3973  * wm_linkintr:
3974  *
3975  *	Helper; handle link interrupts.
3976  */
3977 static void
3978 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3979 {
3980 
3981 	if (sc->sc_flags & WM_F_HAS_MII)
3982 		wm_linkintr_gmii(sc, icr);
3983 	else
3984 		wm_linkintr_tbi(sc, icr);
3985 }
3986 
3987 /*
3988  * wm_tick:
3989  *
3990  *	One second timer, used to check link status, sweep up
3991  *	completed transmit jobs, etc.
3992  */
3993 static void
3994 wm_tick(void *arg)
3995 {
3996 	struct wm_softc *sc = arg;
3997 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3998 	int s;
3999 
4000 	s = splnet();
4001 
4002 	if (sc->sc_type >= WM_T_82542_2_1) {
4003 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
4004 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
4005 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
4006 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
4007 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
4008 	}
4009 
4010 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4011 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
4012 	    + CSR_READ(sc, WMREG_CRCERRS)
4013 	    + CSR_READ(sc, WMREG_ALGNERRC)
4014 	    + CSR_READ(sc, WMREG_SYMERRC)
4015 	    + CSR_READ(sc, WMREG_RXERRC)
4016 	    + CSR_READ(sc, WMREG_SEC)
4017 	    + CSR_READ(sc, WMREG_CEXTERR)
4018 	    + CSR_READ(sc, WMREG_RLEC);
4019 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
4020 
4021 	if (sc->sc_flags & WM_F_HAS_MII)
4022 		mii_tick(&sc->sc_mii);
4023 	else
4024 		wm_tbi_check_link(sc);
4025 
4026 	splx(s);
4027 
4028 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4029 }
4030 
4031 /*
4032  * wm_reset:
4033  *
4034  *	Reset the i82542 chip.
4035  */
4036 static void
4037 wm_reset(struct wm_softc *sc)
4038 {
4039 	int phy_reset = 0;
4040 	uint32_t reg, mask;
4041 
4042 	/*
4043 	 * Allocate on-chip memory according to the MTU size.
4044 	 * The Packet Buffer Allocation register must be written
4045 	 * before the chip is reset.
4046 	 */
4047 	switch (sc->sc_type) {
4048 	case WM_T_82547:
4049 	case WM_T_82547_2:
4050 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4051 		    PBA_22K : PBA_30K;
4052 		sc->sc_txfifo_head = 0;
4053 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4054 		sc->sc_txfifo_size =
4055 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4056 		sc->sc_txfifo_stall = 0;
4057 		break;
4058 	case WM_T_82571:
4059 	case WM_T_82572:
4060 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4061 	case WM_T_I350:
4062 	case WM_T_I354:
4063 	case WM_T_80003:
4064 		sc->sc_pba = PBA_32K;
4065 		break;
4066 	case WM_T_82580:
4067 	case WM_T_82580ER:
4068 		sc->sc_pba = PBA_35K;
4069 		break;
4070 	case WM_T_I210:
4071 	case WM_T_I211:
4072 		sc->sc_pba = PBA_34K;
4073 		break;
4074 	case WM_T_82576:
4075 		sc->sc_pba = PBA_64K;
4076 		break;
4077 	case WM_T_82573:
4078 		sc->sc_pba = PBA_12K;
4079 		break;
4080 	case WM_T_82574:
4081 	case WM_T_82583:
4082 		sc->sc_pba = PBA_20K;
4083 		break;
4084 	case WM_T_ICH8:
4085 		sc->sc_pba = PBA_8K;
4086 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4087 		break;
4088 	case WM_T_ICH9:
4089 	case WM_T_ICH10:
4090 		sc->sc_pba = PBA_10K;
4091 		break;
4092 	case WM_T_PCH:
4093 	case WM_T_PCH2:
4094 	case WM_T_PCH_LPT:
4095 		sc->sc_pba = PBA_26K;
4096 		break;
4097 	default:
4098 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4099 		    PBA_40K : PBA_48K;
4100 		break;
4101 	}
4102 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4103 
4104 	/* Prevent the PCI-E bus from sticking */
4105 	if (sc->sc_flags & WM_F_PCIE) {
4106 		int timeout = 800;
4107 
4108 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4109 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4110 
4111 		while (timeout--) {
4112 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4113 			    == 0)
4114 				break;
4115 			delay(100);
4116 		}
4117 	}
4118 
4119 	/* Set the completion timeout for interface */
4120 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4121 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4122 		wm_set_pcie_completion_timeout(sc);
4123 
4124 	/* Clear interrupt */
4125 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4126 
4127 	/* Stop the transmit and receive processes. */
4128 	CSR_WRITE(sc, WMREG_RCTL, 0);
4129 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4130 	sc->sc_rctl &= ~RCTL_EN;
4131 
4132 	/* XXX set_tbi_sbp_82543() */
4133 
4134 	delay(10*1000);
4135 
4136 	/* Must acquire the MDIO ownership before MAC reset */
4137 	switch (sc->sc_type) {
4138 	case WM_T_82573:
4139 	case WM_T_82574:
4140 	case WM_T_82583:
4141 		wm_get_hw_semaphore_82573(sc);
4142 		break;
4143 	default:
4144 		break;
4145 	}
4146 
4147 	/*
4148 	 * 82541 Errata 29? & 82547 Errata 28?
4149 	 * See also the description about PHY_RST bit in CTRL register
4150 	 * in 8254x_GBe_SDM.pdf.
4151 	 */
4152 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4153 		CSR_WRITE(sc, WMREG_CTRL,
4154 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4155 		delay(5000);
4156 	}
4157 
4158 	switch (sc->sc_type) {
4159 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4160 	case WM_T_82541:
4161 	case WM_T_82541_2:
4162 	case WM_T_82547:
4163 	case WM_T_82547_2:
4164 		/*
4165 		 * On some chipsets, a reset through a memory-mapped write
4166 		 * cycle can cause the chip to reset before completing the
4167 		 * write cycle.  This causes major headache that can be
4168 		 * avoided by issuing the reset via indirect register writes
4169 		 * through I/O space.
4170 		 *
4171 		 * So, if we successfully mapped the I/O BAR at attach time,
4172 		 * use that.  Otherwise, try our luck with a memory-mapped
4173 		 * reset.
4174 		 */
4175 		if (sc->sc_flags & WM_F_IOH_VALID)
4176 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4177 		else
4178 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4179 		break;
4180 	case WM_T_82545_3:
4181 	case WM_T_82546_3:
4182 		/* Use the shadow control register on these chips. */
4183 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4184 		break;
4185 	case WM_T_80003:
4186 		mask = swfwphysem[sc->sc_funcid];
4187 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4188 		wm_get_swfw_semaphore(sc, mask);
4189 		CSR_WRITE(sc, WMREG_CTRL, reg);
4190 		wm_put_swfw_semaphore(sc, mask);
4191 		break;
4192 	case WM_T_ICH8:
4193 	case WM_T_ICH9:
4194 	case WM_T_ICH10:
4195 	case WM_T_PCH:
4196 	case WM_T_PCH2:
4197 	case WM_T_PCH_LPT:
4198 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4199 		if (wm_check_reset_block(sc) == 0) {
4200 			/*
4201 			 * Gate automatic PHY configuration by hardware on
4202 			 * non-managed 82579
4203 			 */
4204 			if ((sc->sc_type == WM_T_PCH2)
4205 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4206 				!= 0))
4207 				wm_gate_hw_phy_config_ich8lan(sc, 1);
4208 
4209 
4210 			reg |= CTRL_PHY_RESET;
4211 			phy_reset = 1;
4212 		}
4213 		wm_get_swfwhw_semaphore(sc);
4214 		CSR_WRITE(sc, WMREG_CTRL, reg);
4215 		delay(20*1000);
4216 		wm_put_swfwhw_semaphore(sc);
4217 		break;
4218 	case WM_T_82542_2_0:
4219 	case WM_T_82542_2_1:
4220 	case WM_T_82543:
4221 	case WM_T_82540:
4222 	case WM_T_82545:
4223 	case WM_T_82546:
4224 	case WM_T_82571:
4225 	case WM_T_82572:
4226 	case WM_T_82573:
4227 	case WM_T_82574:
4228 	case WM_T_82575:
4229 	case WM_T_82576:
4230 	case WM_T_82580:
4231 	case WM_T_82580ER:
4232 	case WM_T_82583:
4233 	case WM_T_I350:
4234 	case WM_T_I354:
4235 	case WM_T_I210:
4236 	case WM_T_I211:
4237 	default:
4238 		/* Everything else can safely use the documented method. */
4239 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4240 		break;
4241 	}
4242 
4243 	/* Must release the MDIO ownership after MAC reset */
4244 	switch (sc->sc_type) {
4245 	case WM_T_82574:
4246 	case WM_T_82583:
4247 		wm_put_hw_semaphore_82573(sc);
4248 		break;
4249 	default:
4250 		break;
4251 	}
4252 
4253 	if (phy_reset != 0)
4254 		wm_get_cfg_done(sc);
4255 
4256 	/* reload EEPROM */
4257 	switch (sc->sc_type) {
4258 	case WM_T_82542_2_0:
4259 	case WM_T_82542_2_1:
4260 	case WM_T_82543:
4261 	case WM_T_82544:
4262 		delay(10);
4263 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4264 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4265 		delay(2000);
4266 		break;
4267 	case WM_T_82540:
4268 	case WM_T_82545:
4269 	case WM_T_82545_3:
4270 	case WM_T_82546:
4271 	case WM_T_82546_3:
4272 		delay(5*1000);
4273 		/* XXX Disable HW ARPs on ASF enabled adapters */
4274 		break;
4275 	case WM_T_82541:
4276 	case WM_T_82541_2:
4277 	case WM_T_82547:
4278 	case WM_T_82547_2:
4279 		delay(20000);
4280 		/* XXX Disable HW ARPs on ASF enabled adapters */
4281 		break;
4282 	case WM_T_82571:
4283 	case WM_T_82572:
4284 	case WM_T_82573:
4285 	case WM_T_82574:
4286 	case WM_T_82583:
4287 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4288 			delay(10);
4289 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4290 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4291 		}
4292 		/* check EECD_EE_AUTORD */
4293 		wm_get_auto_rd_done(sc);
4294 		/*
4295 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4296 		 * is set.
4297 		 */
4298 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4299 		    || (sc->sc_type == WM_T_82583))
4300 			delay(25*1000);
4301 		break;
4302 	case WM_T_82575:
4303 	case WM_T_82576:
4304 	case WM_T_82580:
4305 	case WM_T_82580ER:
4306 	case WM_T_I350:
4307 	case WM_T_I354:
4308 	case WM_T_I210:
4309 	case WM_T_I211:
4310 	case WM_T_80003:
4311 		/* check EECD_EE_AUTORD */
4312 		wm_get_auto_rd_done(sc);
4313 		break;
4314 	case WM_T_ICH8:
4315 	case WM_T_ICH9:
4316 	case WM_T_ICH10:
4317 	case WM_T_PCH:
4318 	case WM_T_PCH2:
4319 	case WM_T_PCH_LPT:
4320 		break;
4321 	default:
4322 		panic("%s: unknown type\n", __func__);
4323 	}
4324 
4325 	/* Check whether EEPROM is present or not */
4326 	switch (sc->sc_type) {
4327 	case WM_T_82575:
4328 	case WM_T_82576:
4329 #if 0 /* XXX */
4330 	case WM_T_82580:
4331 	case WM_T_82580ER:
4332 #endif
4333 	case WM_T_I350:
4334 	case WM_T_I354:
4335 	case WM_T_ICH8:
4336 	case WM_T_ICH9:
4337 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4338 			/* Not found */
4339 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4340 			if ((sc->sc_type == WM_T_82575)
4341 			    || (sc->sc_type == WM_T_82576)
4342 			    || (sc->sc_type == WM_T_82580)
4343 			    || (sc->sc_type == WM_T_82580ER)
4344 			    || (sc->sc_type == WM_T_I350)
4345 			    || (sc->sc_type == WM_T_I354))
4346 				wm_reset_init_script_82575(sc);
4347 		}
4348 		break;
4349 	default:
4350 		break;
4351 	}
4352 
4353 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4354 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4355 		/* clear global device reset status bit */
4356 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4357 	}
4358 
4359 	/* Clear any pending interrupt events. */
4360 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4361 	reg = CSR_READ(sc, WMREG_ICR);
4362 
4363 	/* reload sc_ctrl */
4364 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4365 
4366 	if (sc->sc_type == WM_T_I350)
4367 		wm_set_eee_i350(sc);
4368 
4369 	/* dummy read from WUC */
4370 	if (sc->sc_type == WM_T_PCH)
4371 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4372 	/*
4373 	 * For PCH, this write will make sure that any noise will be detected
4374 	 * as a CRC error and be dropped rather than show up as a bad packet
4375 	 * to the DMA engine
4376 	 */
4377 	if (sc->sc_type == WM_T_PCH)
4378 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4379 
4380 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4381 		CSR_WRITE(sc, WMREG_WUC, 0);
4382 
4383 	/* XXX need special handling for 82580 */
4384 }
4385 
4386 static void
4387 wm_set_vlan(struct wm_softc *sc)
4388 {
4389 	/* Deal with VLAN enables. */
4390 	if (VLAN_ATTACHED(&sc->sc_ethercom))
4391 		sc->sc_ctrl |= CTRL_VME;
4392 	else
4393 		sc->sc_ctrl &= ~CTRL_VME;
4394 
4395 	/* Write the control registers. */
4396 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4397 }
4398 
4399 /*
4400  * wm_init:		[ifnet interface function]
4401  *
4402  *	Initialize the interface.  Must be called at splnet().
4403  */
4404 static int
4405 wm_init(struct ifnet *ifp)
4406 {
4407 	struct wm_softc *sc = ifp->if_softc;
4408 	struct wm_rxsoft *rxs;
4409 	int i, j, trynum, error = 0;
4410 	uint32_t reg;
4411 
4412 	/*
4413 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4414 	 * There is a small but measurable benefit to avoiding the adjusment
4415 	 * of the descriptor so that the headers are aligned, for normal mtu,
4416 	 * on such platforms.  One possibility is that the DMA itself is
4417 	 * slightly more efficient if the front of the entire packet (instead
4418 	 * of the front of the headers) is aligned.
4419 	 *
4420 	 * Note we must always set align_tweak to 0 if we are using
4421 	 * jumbo frames.
4422 	 */
4423 #ifdef __NO_STRICT_ALIGNMENT
4424 	sc->sc_align_tweak = 0;
4425 #else
4426 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4427 		sc->sc_align_tweak = 0;
4428 	else
4429 		sc->sc_align_tweak = 2;
4430 #endif /* __NO_STRICT_ALIGNMENT */
4431 
4432 	/* Cancel any pending I/O. */
4433 	wm_stop(ifp, 0);
4434 
4435 	/* update statistics before reset */
4436 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4437 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4438 
4439 	/* Reset the chip to a known state. */
4440 	wm_reset(sc);
4441 
4442 	switch (sc->sc_type) {
4443 	case WM_T_82571:
4444 	case WM_T_82572:
4445 	case WM_T_82573:
4446 	case WM_T_82574:
4447 	case WM_T_82583:
4448 	case WM_T_80003:
4449 	case WM_T_ICH8:
4450 	case WM_T_ICH9:
4451 	case WM_T_ICH10:
4452 	case WM_T_PCH:
4453 	case WM_T_PCH2:
4454 	case WM_T_PCH_LPT:
4455 		if (wm_check_mng_mode(sc) != 0)
4456 			wm_get_hw_control(sc);
4457 		break;
4458 	default:
4459 		break;
4460 	}
4461 
4462 	/* Reset the PHY. */
4463 	if (sc->sc_flags & WM_F_HAS_MII)
4464 		wm_gmii_reset(sc);
4465 
4466 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
4467 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
4468 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
4469 	    || (sc->sc_type == WM_T_PCH_LPT))
4470 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4471 
4472 	/* Initialize the transmit descriptor ring. */
4473 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4474 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4475 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4476 	sc->sc_txfree = WM_NTXDESC(sc);
4477 	sc->sc_txnext = 0;
4478 
4479 	if (sc->sc_type < WM_T_82543) {
4480 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4481 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4482 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4483 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4484 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4485 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4486 	} else {
4487 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4488 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4489 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4490 		CSR_WRITE(sc, WMREG_TDH, 0);
4491 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
4492 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
4493 
4494 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4495 			/*
4496 			 * Don't write TDT before TCTL.EN is set.
4497 			 * See the document.
4498 			 */
4499 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4500 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4501 			    | TXDCTL_WTHRESH(0));
4502 		else {
4503 			CSR_WRITE(sc, WMREG_TDT, 0);
4504 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4505 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4506 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4507 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4508 		}
4509 	}
4510 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4511 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4512 
4513 	/* Initialize the transmit job descriptors. */
4514 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4515 		sc->sc_txsoft[i].txs_mbuf = NULL;
4516 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
4517 	sc->sc_txsnext = 0;
4518 	sc->sc_txsdirty = 0;
4519 
4520 	/*
4521 	 * Initialize the receive descriptor and receive job
4522 	 * descriptor rings.
4523 	 */
4524 	if (sc->sc_type < WM_T_82543) {
4525 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4526 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4527 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4528 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4529 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4530 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4531 
4532 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4533 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4534 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4535 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4536 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4537 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4538 	} else {
4539 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4540 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4541 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4542 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4543 			CSR_WRITE(sc, WMREG_EITR(0), 450);
4544 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4545 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4546 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4547 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4548 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4549 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4550 			    | RXDCTL_WTHRESH(1));
4551 		} else {
4552 			CSR_WRITE(sc, WMREG_RDH, 0);
4553 			CSR_WRITE(sc, WMREG_RDT, 0);
4554 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4555 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
4556 		}
4557 	}
4558 	for (i = 0; i < WM_NRXDESC; i++) {
4559 		rxs = &sc->sc_rxsoft[i];
4560 		if (rxs->rxs_mbuf == NULL) {
4561 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
4562 				log(LOG_ERR, "%s: unable to allocate or map "
4563 				    "rx buffer %d, error = %d\n",
4564 				    device_xname(sc->sc_dev), i, error);
4565 				/*
4566 				 * XXX Should attempt to run with fewer receive
4567 				 * XXX buffers instead of just failing.
4568 				 */
4569 				wm_rxdrain(sc);
4570 				goto out;
4571 			}
4572 		} else {
4573 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4574 				WM_INIT_RXDESC(sc, i);
4575 			/*
4576 			 * For 82575 and newer device, the RX descriptors
4577 			 * must be initialized after the setting of RCTL.EN in
4578 			 * wm_set_filter()
4579 			 */
4580 		}
4581 	}
4582 	sc->sc_rxptr = 0;
4583 	sc->sc_rxdiscard = 0;
4584 	WM_RXCHAIN_RESET(sc);
4585 
4586 	/*
4587 	 * Clear out the VLAN table -- we don't use it (yet).
4588 	 */
4589 	CSR_WRITE(sc, WMREG_VET, 0);
4590 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4591 		trynum = 10; /* Due to hw errata */
4592 	else
4593 		trynum = 1;
4594 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4595 		for (j = 0; j < trynum; j++)
4596 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4597 
4598 	/*
4599 	 * Set up flow-control parameters.
4600 	 *
4601 	 * XXX Values could probably stand some tuning.
4602 	 */
4603 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4604 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4605 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4606 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4607 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4608 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4609 	}
4610 
4611 	sc->sc_fcrtl = FCRTL_DFLT;
4612 	if (sc->sc_type < WM_T_82543) {
4613 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4614 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4615 	} else {
4616 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4617 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4618 	}
4619 
4620 	if (sc->sc_type == WM_T_80003)
4621 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4622 	else
4623 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4624 
4625 	/* Writes the control register. */
4626 	wm_set_vlan(sc);
4627 
4628 	if (sc->sc_flags & WM_F_HAS_MII) {
4629 		int val;
4630 
4631 		switch (sc->sc_type) {
4632 		case WM_T_80003:
4633 		case WM_T_ICH8:
4634 		case WM_T_ICH9:
4635 		case WM_T_ICH10:
4636 		case WM_T_PCH:
4637 		case WM_T_PCH2:
4638 		case WM_T_PCH_LPT:
4639 			/*
4640 			 * Set the mac to wait the maximum time between each
4641 			 * iteration and increase the max iterations when
4642 			 * polling the phy; this fixes erroneous timeouts at
4643 			 * 10Mbps.
4644 			 */
4645 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4646 			    0xFFFF);
4647 			val = wm_kmrn_readreg(sc,
4648 			    KUMCTRLSTA_OFFSET_INB_PARAM);
4649 			val |= 0x3F;
4650 			wm_kmrn_writereg(sc,
4651 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4652 			break;
4653 		default:
4654 			break;
4655 		}
4656 
4657 		if (sc->sc_type == WM_T_80003) {
4658 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4659 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4660 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4661 
4662 			/* Bypass RX and TX FIFO's */
4663 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4664 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4665 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4666 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4667 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4668 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4669 		}
4670 	}
4671 #if 0
4672 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4673 #endif
4674 
4675 	/*
4676 	 * Set up checksum offload parameters.
4677 	 */
4678 	reg = CSR_READ(sc, WMREG_RXCSUM);
4679 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4680 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4681 		reg |= RXCSUM_IPOFL;
4682 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4683 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4684 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4685 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4686 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4687 
4688 	/* Reset TBI's RXCFG count */
4689 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4690 
4691 	/*
4692 	 * Set up the interrupt registers.
4693 	 */
4694 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4695 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4696 	    ICR_RXO | ICR_RXT0;
4697 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4698 		sc->sc_icr |= ICR_RXCFG;
4699 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4700 
4701 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4702 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4703 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4704 		reg = CSR_READ(sc, WMREG_KABGTXD);
4705 		reg |= KABGTXD_BGSQLBIAS;
4706 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4707 	}
4708 
4709 	/* Set up the inter-packet gap. */
4710 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4711 
4712 	if (sc->sc_type >= WM_T_82543) {
4713 		/*
4714 		 * Set up the interrupt throttling register (units of 256ns)
4715 		 * Note that a footnote in Intel's documentation says this
4716 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4717 		 * or 10Mbit mode.  Empirically, it appears to be the case
4718 		 * that that is also true for the 1024ns units of the other
4719 		 * interrupt-related timer registers -- so, really, we ought
4720 		 * to divide this value by 4 when the link speed is low.
4721 		 *
4722 		 * XXX implement this division at link speed change!
4723 		 */
4724 
4725 		 /*
4726 		  * For N interrupts/sec, set this value to:
4727 		  * 1000000000 / (N * 256).  Note that we set the
4728 		  * absolute and packet timer values to this value
4729 		  * divided by 4 to get "simple timer" behavior.
4730 		  */
4731 
4732 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4733 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4734 	}
4735 
4736 	/* Set the VLAN ethernetype. */
4737 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4738 
4739 	/*
4740 	 * Set up the transmit control register; we start out with
4741 	 * a collision distance suitable for FDX, but update it whe
4742 	 * we resolve the media type.
4743 	 */
4744 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4745 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4746 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4747 	if (sc->sc_type >= WM_T_82571)
4748 		sc->sc_tctl |= TCTL_MULR;
4749 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4750 
4751 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4752 		/*
4753 		 * Write TDT after TCTL.EN is set.
4754 		 * See the document.
4755 		 */
4756 		CSR_WRITE(sc, WMREG_TDT, 0);
4757 	}
4758 
4759 	if (sc->sc_type == WM_T_80003) {
4760 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4761 		reg &= ~TCTL_EXT_GCEX_MASK;
4762 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4763 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4764 	}
4765 
4766 	/* Set the media. */
4767 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4768 		goto out;
4769 
4770 	/* Configure for OS presence */
4771 	wm_init_manageability(sc);
4772 
4773 	/*
4774 	 * Set up the receive control register; we actually program
4775 	 * the register when we set the receive filter.  Use multicast
4776 	 * address offset type 0.
4777 	 *
4778 	 * Only the i82544 has the ability to strip the incoming
4779 	 * CRC, so we don't enable that feature.
4780 	 */
4781 	sc->sc_mchash_type = 0;
4782 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4783 	    | RCTL_MO(sc->sc_mchash_type);
4784 
4785 	/*
4786 	 * The I350 has a bug where it always strips the CRC whether
4787 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4788 	 */
4789 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4790 	    || (sc->sc_type == WM_T_I210))
4791 		sc->sc_rctl |= RCTL_SECRC;
4792 
4793 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4794 	    && (ifp->if_mtu > ETHERMTU)) {
4795 		sc->sc_rctl |= RCTL_LPE;
4796 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4797 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4798 	}
4799 
4800 	if (MCLBYTES == 2048) {
4801 		sc->sc_rctl |= RCTL_2k;
4802 	} else {
4803 		if (sc->sc_type >= WM_T_82543) {
4804 			switch (MCLBYTES) {
4805 			case 4096:
4806 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4807 				break;
4808 			case 8192:
4809 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4810 				break;
4811 			case 16384:
4812 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4813 				break;
4814 			default:
4815 				panic("wm_init: MCLBYTES %d unsupported",
4816 				    MCLBYTES);
4817 				break;
4818 			}
4819 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4820 	}
4821 
4822 	/* Set the receive filter. */
4823 	wm_set_filter(sc);
4824 
4825 	/* Enable ECC */
4826 	switch (sc->sc_type) {
4827 	case WM_T_82571:
4828 		reg = CSR_READ(sc, WMREG_PBA_ECC);
4829 		reg |= PBA_ECC_CORR_EN;
4830 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4831 		break;
4832 	case WM_T_PCH_LPT:
4833 		reg = CSR_READ(sc, WMREG_PBECCSTS);
4834 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4835 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4836 
4837 		reg = CSR_READ(sc, WMREG_CTRL);
4838 		reg |= CTRL_MEHE;
4839 		CSR_WRITE(sc, WMREG_CTRL, reg);
4840 		break;
4841 	default:
4842 		break;
4843 	}
4844 
4845 	/* On 575 and later set RDT only if RX enabled */
4846 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4847 		for (i = 0; i < WM_NRXDESC; i++)
4848 			WM_INIT_RXDESC(sc, i);
4849 
4850 	/* Start the one second link check clock. */
4851 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4852 
4853 	/* ...all done! */
4854 	ifp->if_flags |= IFF_RUNNING;
4855 	ifp->if_flags &= ~IFF_OACTIVE;
4856 
4857  out:
4858 	sc->sc_if_flags = ifp->if_flags;
4859 	if (error)
4860 		log(LOG_ERR, "%s: interface not running\n",
4861 		    device_xname(sc->sc_dev));
4862 	return error;
4863 }
4864 
4865 /*
4866  * wm_rxdrain:
4867  *
4868  *	Drain the receive queue.
4869  */
4870 static void
4871 wm_rxdrain(struct wm_softc *sc)
4872 {
4873 	struct wm_rxsoft *rxs;
4874 	int i;
4875 
4876 	for (i = 0; i < WM_NRXDESC; i++) {
4877 		rxs = &sc->sc_rxsoft[i];
4878 		if (rxs->rxs_mbuf != NULL) {
4879 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4880 			m_freem(rxs->rxs_mbuf);
4881 			rxs->rxs_mbuf = NULL;
4882 		}
4883 	}
4884 }
4885 
4886 /*
4887  * wm_stop:		[ifnet interface function]
4888  *
4889  *	Stop transmission on the interface.
4890  */
4891 static void
4892 wm_stop(struct ifnet *ifp, int disable)
4893 {
4894 	struct wm_softc *sc = ifp->if_softc;
4895 	struct wm_txsoft *txs;
4896 	int i;
4897 
4898 	/* Stop the one second clock. */
4899 	callout_stop(&sc->sc_tick_ch);
4900 
4901 	/* Stop the 82547 Tx FIFO stall check timer. */
4902 	if (sc->sc_type == WM_T_82547)
4903 		callout_stop(&sc->sc_txfifo_ch);
4904 
4905 	if (sc->sc_flags & WM_F_HAS_MII) {
4906 		/* Down the MII. */
4907 		mii_down(&sc->sc_mii);
4908 	} else {
4909 #if 0
4910 		/* Should we clear PHY's status properly? */
4911 		wm_reset(sc);
4912 #endif
4913 	}
4914 
4915 	/* Stop the transmit and receive processes. */
4916 	CSR_WRITE(sc, WMREG_TCTL, 0);
4917 	CSR_WRITE(sc, WMREG_RCTL, 0);
4918 	sc->sc_rctl &= ~RCTL_EN;
4919 
4920 	/*
4921 	 * Clear the interrupt mask to ensure the device cannot assert its
4922 	 * interrupt line.
4923 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4924 	 * any currently pending or shared interrupt.
4925 	 */
4926 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4927 	sc->sc_icr = 0;
4928 
4929 	/* Release any queued transmit buffers. */
4930 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4931 		txs = &sc->sc_txsoft[i];
4932 		if (txs->txs_mbuf != NULL) {
4933 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4934 			m_freem(txs->txs_mbuf);
4935 			txs->txs_mbuf = NULL;
4936 		}
4937 	}
4938 
4939 	/* Mark the interface as down and cancel the watchdog timer. */
4940 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4941 	ifp->if_timer = 0;
4942 
4943 	if (disable)
4944 		wm_rxdrain(sc);
4945 
4946 #if 0 /* notyet */
4947 	if (sc->sc_type >= WM_T_82544)
4948 		CSR_WRITE(sc, WMREG_WUC, 0);
4949 #endif
4950 }
4951 
4952 void
4953 wm_get_auto_rd_done(struct wm_softc *sc)
4954 {
4955 	int i;
4956 
4957 	/* wait for eeprom to reload */
4958 	switch (sc->sc_type) {
4959 	case WM_T_82571:
4960 	case WM_T_82572:
4961 	case WM_T_82573:
4962 	case WM_T_82574:
4963 	case WM_T_82583:
4964 	case WM_T_82575:
4965 	case WM_T_82576:
4966 	case WM_T_82580:
4967 	case WM_T_82580ER:
4968 	case WM_T_I350:
4969 	case WM_T_I354:
4970 	case WM_T_I210:
4971 	case WM_T_I211:
4972 	case WM_T_80003:
4973 	case WM_T_ICH8:
4974 	case WM_T_ICH9:
4975 		for (i = 0; i < 10; i++) {
4976 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4977 				break;
4978 			delay(1000);
4979 		}
4980 		if (i == 10) {
4981 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4982 			    "complete\n", device_xname(sc->sc_dev));
4983 		}
4984 		break;
4985 	default:
4986 		break;
4987 	}
4988 }
4989 
4990 void
4991 wm_lan_init_done(struct wm_softc *sc)
4992 {
4993 	uint32_t reg = 0;
4994 	int i;
4995 
4996 	/* wait for eeprom to reload */
4997 	switch (sc->sc_type) {
4998 	case WM_T_ICH10:
4999 	case WM_T_PCH:
5000 	case WM_T_PCH2:
5001 	case WM_T_PCH_LPT:
5002 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
5003 			reg = CSR_READ(sc, WMREG_STATUS);
5004 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
5005 				break;
5006 			delay(100);
5007 		}
5008 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
5009 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
5010 			    "complete\n", device_xname(sc->sc_dev), __func__);
5011 		}
5012 		break;
5013 	default:
5014 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5015 		    __func__);
5016 		break;
5017 	}
5018 
5019 	reg &= ~STATUS_LAN_INIT_DONE;
5020 	CSR_WRITE(sc, WMREG_STATUS, reg);
5021 }
5022 
5023 void
5024 wm_get_cfg_done(struct wm_softc *sc)
5025 {
5026 	int mask;
5027 	uint32_t reg;
5028 	int i;
5029 
5030 	/* wait for eeprom to reload */
5031 	switch (sc->sc_type) {
5032 	case WM_T_82542_2_0:
5033 	case WM_T_82542_2_1:
5034 		/* null */
5035 		break;
5036 	case WM_T_82543:
5037 	case WM_T_82544:
5038 	case WM_T_82540:
5039 	case WM_T_82545:
5040 	case WM_T_82545_3:
5041 	case WM_T_82546:
5042 	case WM_T_82546_3:
5043 	case WM_T_82541:
5044 	case WM_T_82541_2:
5045 	case WM_T_82547:
5046 	case WM_T_82547_2:
5047 	case WM_T_82573:
5048 	case WM_T_82574:
5049 	case WM_T_82583:
5050 		/* generic */
5051 		delay(10*1000);
5052 		break;
5053 	case WM_T_80003:
5054 	case WM_T_82571:
5055 	case WM_T_82572:
5056 	case WM_T_82575:
5057 	case WM_T_82576:
5058 	case WM_T_82580:
5059 	case WM_T_82580ER:
5060 	case WM_T_I350:
5061 	case WM_T_I354:
5062 	case WM_T_I210:
5063 	case WM_T_I211:
5064 		if (sc->sc_type == WM_T_82571) {
5065 			/* Only 82571 shares port 0 */
5066 			mask = EEMNGCTL_CFGDONE_0;
5067 		} else
5068 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5069 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5070 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5071 				break;
5072 			delay(1000);
5073 		}
5074 		if (i >= WM_PHY_CFG_TIMEOUT) {
5075 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5076 				device_xname(sc->sc_dev), __func__));
5077 		}
5078 		break;
5079 	case WM_T_ICH8:
5080 	case WM_T_ICH9:
5081 	case WM_T_ICH10:
5082 	case WM_T_PCH:
5083 	case WM_T_PCH2:
5084 	case WM_T_PCH_LPT:
5085 		delay(10*1000);
5086 		if (sc->sc_type >= WM_T_ICH10)
5087 			wm_lan_init_done(sc);
5088 		else
5089 			wm_get_auto_rd_done(sc);
5090 
5091 		reg = CSR_READ(sc, WMREG_STATUS);
5092 		if ((reg & STATUS_PHYRA) != 0)
5093 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
5094 		break;
5095 	default:
5096 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5097 		    __func__);
5098 		break;
5099 	}
5100 }
5101 
5102 /*
5103  * wm_acquire_eeprom:
5104  *
5105  *	Perform the EEPROM handshake required on some chips.
5106  */
5107 static int
5108 wm_acquire_eeprom(struct wm_softc *sc)
5109 {
5110 	uint32_t reg;
5111 	int x;
5112 	int ret = 0;
5113 
5114 	/* always success */
5115 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5116 		return 0;
5117 
5118 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
5119 		ret = wm_get_swfwhw_semaphore(sc);
5120 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5121 		/* this will also do wm_get_swsm_semaphore() if needed */
5122 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5123 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5124 		ret = wm_get_swsm_semaphore(sc);
5125 	}
5126 
5127 	if (ret) {
5128 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5129 			__func__);
5130 		return 1;
5131 	}
5132 
5133 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5134 		reg = CSR_READ(sc, WMREG_EECD);
5135 
5136 		/* Request EEPROM access. */
5137 		reg |= EECD_EE_REQ;
5138 		CSR_WRITE(sc, WMREG_EECD, reg);
5139 
5140 		/* ..and wait for it to be granted. */
5141 		for (x = 0; x < 1000; x++) {
5142 			reg = CSR_READ(sc, WMREG_EECD);
5143 			if (reg & EECD_EE_GNT)
5144 				break;
5145 			delay(5);
5146 		}
5147 		if ((reg & EECD_EE_GNT) == 0) {
5148 			aprint_error_dev(sc->sc_dev,
5149 			    "could not acquire EEPROM GNT\n");
5150 			reg &= ~EECD_EE_REQ;
5151 			CSR_WRITE(sc, WMREG_EECD, reg);
5152 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5153 				wm_put_swfwhw_semaphore(sc);
5154 			if (sc->sc_flags & WM_F_SWFW_SYNC)
5155 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5156 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5157 				wm_put_swsm_semaphore(sc);
5158 			return 1;
5159 		}
5160 	}
5161 
5162 	return 0;
5163 }
5164 
5165 /*
5166  * wm_release_eeprom:
5167  *
5168  *	Release the EEPROM mutex.
5169  */
5170 static void
5171 wm_release_eeprom(struct wm_softc *sc)
5172 {
5173 	uint32_t reg;
5174 
5175 	/* always success */
5176 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5177 		return;
5178 
5179 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5180 		reg = CSR_READ(sc, WMREG_EECD);
5181 		reg &= ~EECD_EE_REQ;
5182 		CSR_WRITE(sc, WMREG_EECD, reg);
5183 	}
5184 
5185 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5186 		wm_put_swfwhw_semaphore(sc);
5187 	if (sc->sc_flags & WM_F_SWFW_SYNC)
5188 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5189 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5190 		wm_put_swsm_semaphore(sc);
5191 }
5192 
5193 /*
5194  * wm_eeprom_sendbits:
5195  *
5196  *	Send a series of bits to the EEPROM.
5197  */
5198 static void
5199 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5200 {
5201 	uint32_t reg;
5202 	int x;
5203 
5204 	reg = CSR_READ(sc, WMREG_EECD);
5205 
5206 	for (x = nbits; x > 0; x--) {
5207 		if (bits & (1U << (x - 1)))
5208 			reg |= EECD_DI;
5209 		else
5210 			reg &= ~EECD_DI;
5211 		CSR_WRITE(sc, WMREG_EECD, reg);
5212 		delay(2);
5213 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5214 		delay(2);
5215 		CSR_WRITE(sc, WMREG_EECD, reg);
5216 		delay(2);
5217 	}
5218 }
5219 
5220 /*
5221  * wm_eeprom_recvbits:
5222  *
5223  *	Receive a series of bits from the EEPROM.
5224  */
5225 static void
5226 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5227 {
5228 	uint32_t reg, val;
5229 	int x;
5230 
5231 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5232 
5233 	val = 0;
5234 	for (x = nbits; x > 0; x--) {
5235 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5236 		delay(2);
5237 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5238 			val |= (1U << (x - 1));
5239 		CSR_WRITE(sc, WMREG_EECD, reg);
5240 		delay(2);
5241 	}
5242 	*valp = val;
5243 }
5244 
5245 /*
5246  * wm_read_eeprom_uwire:
5247  *
5248  *	Read a word from the EEPROM using the MicroWire protocol.
5249  */
5250 static int
5251 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5252 {
5253 	uint32_t reg, val;
5254 	int i;
5255 
5256 	for (i = 0; i < wordcnt; i++) {
5257 		/* Clear SK and DI. */
5258 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5259 		CSR_WRITE(sc, WMREG_EECD, reg);
5260 
5261 		/*
5262 		 * XXX: workaround for a bug in qemu-0.12.x and prior
5263 		 * and Xen.
5264 		 *
5265 		 * We use this workaround only for 82540 because qemu's
5266 		 * e1000 act as 82540.
5267 		 */
5268 		if (sc->sc_type == WM_T_82540) {
5269 			reg |= EECD_SK;
5270 			CSR_WRITE(sc, WMREG_EECD, reg);
5271 			reg &= ~EECD_SK;
5272 			CSR_WRITE(sc, WMREG_EECD, reg);
5273 			delay(2);
5274 		}
5275 		/* XXX: end of workaround */
5276 
5277 		/* Set CHIP SELECT. */
5278 		reg |= EECD_CS;
5279 		CSR_WRITE(sc, WMREG_EECD, reg);
5280 		delay(2);
5281 
5282 		/* Shift in the READ command. */
5283 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5284 
5285 		/* Shift in address. */
5286 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5287 
5288 		/* Shift out the data. */
5289 		wm_eeprom_recvbits(sc, &val, 16);
5290 		data[i] = val & 0xffff;
5291 
5292 		/* Clear CHIP SELECT. */
5293 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5294 		CSR_WRITE(sc, WMREG_EECD, reg);
5295 		delay(2);
5296 	}
5297 
5298 	return 0;
5299 }
5300 
5301 /*
5302  * wm_spi_eeprom_ready:
5303  *
5304  *	Wait for a SPI EEPROM to be ready for commands.
5305  */
5306 static int
5307 wm_spi_eeprom_ready(struct wm_softc *sc)
5308 {
5309 	uint32_t val;
5310 	int usec;
5311 
5312 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5313 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5314 		wm_eeprom_recvbits(sc, &val, 8);
5315 		if ((val & SPI_SR_RDY) == 0)
5316 			break;
5317 	}
5318 	if (usec >= SPI_MAX_RETRIES) {
5319 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5320 		return 1;
5321 	}
5322 	return 0;
5323 }
5324 
5325 /*
5326  * wm_read_eeprom_spi:
5327  *
5328  *	Read a work from the EEPROM using the SPI protocol.
5329  */
5330 static int
5331 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5332 {
5333 	uint32_t reg, val;
5334 	int i;
5335 	uint8_t opc;
5336 
5337 	/* Clear SK and CS. */
5338 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5339 	CSR_WRITE(sc, WMREG_EECD, reg);
5340 	delay(2);
5341 
5342 	if (wm_spi_eeprom_ready(sc))
5343 		return 1;
5344 
5345 	/* Toggle CS to flush commands. */
5346 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5347 	delay(2);
5348 	CSR_WRITE(sc, WMREG_EECD, reg);
5349 	delay(2);
5350 
5351 	opc = SPI_OPC_READ;
5352 	if (sc->sc_ee_addrbits == 8 && word >= 128)
5353 		opc |= SPI_OPC_A8;
5354 
5355 	wm_eeprom_sendbits(sc, opc, 8);
5356 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5357 
5358 	for (i = 0; i < wordcnt; i++) {
5359 		wm_eeprom_recvbits(sc, &val, 16);
5360 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5361 	}
5362 
5363 	/* Raise CS and clear SK. */
5364 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5365 	CSR_WRITE(sc, WMREG_EECD, reg);
5366 	delay(2);
5367 
5368 	return 0;
5369 }
5370 
5371 #define NVM_CHECKSUM			0xBABA
5372 #define EEPROM_SIZE			0x0040
5373 #define NVM_COMPAT			0x0003
5374 #define NVM_COMPAT_VALID_CHECKSUM	0x0001
5375 #define NVM_FUTURE_INIT_WORD1			0x0019
5376 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM	0x0040
5377 
5378 /*
5379  * wm_validate_eeprom_checksum
5380  *
5381  * The checksum is defined as the sum of the first 64 (16 bit) words.
5382  */
5383 static int
5384 wm_validate_eeprom_checksum(struct wm_softc *sc)
5385 {
5386 	uint16_t checksum;
5387 	uint16_t eeprom_data;
5388 #ifdef WM_DEBUG
5389 	uint16_t csum_wordaddr, valid_checksum;
5390 #endif
5391 	int i;
5392 
5393 	checksum = 0;
5394 
5395 	/* Don't check for I211 */
5396 	if (sc->sc_type == WM_T_I211)
5397 		return 0;
5398 
5399 #ifdef WM_DEBUG
5400 	if (sc->sc_type == WM_T_PCH_LPT) {
5401 		csum_wordaddr = NVM_COMPAT;
5402 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
5403 	} else {
5404 		csum_wordaddr = NVM_FUTURE_INIT_WORD1;
5405 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
5406 	}
5407 
5408 	/* Dump EEPROM image for debug */
5409 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5410 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5411 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5412 		wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
5413 		if ((eeprom_data & valid_checksum) == 0) {
5414 			DPRINTF(WM_DEBUG_NVM,
5415 			    ("%s: NVM need to be updated (%04x != %04x)\n",
5416 				device_xname(sc->sc_dev), eeprom_data,
5417 				    valid_checksum));
5418 		}
5419 	}
5420 
5421 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
5422 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5423 		for (i = 0; i < EEPROM_SIZE; i++) {
5424 			if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5425 				printf("XX ");
5426 			else
5427 				printf("%04x ", eeprom_data);
5428 			if (i % 8 == 7)
5429 				printf("\n");
5430 		}
5431 	}
5432 
5433 #endif /* WM_DEBUG */
5434 
5435 	for (i = 0; i < EEPROM_SIZE; i++) {
5436 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5437 			return 1;
5438 		checksum += eeprom_data;
5439 	}
5440 
5441 	if (checksum != (uint16_t) NVM_CHECKSUM) {
5442 #ifdef WM_DEBUG
5443 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
5444 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
5445 #endif
5446 	}
5447 
5448 	return 0;
5449 }
5450 
5451 /*
5452  * wm_read_eeprom:
5453  *
5454  *	Read data from the serial EEPROM.
5455  */
5456 static int
5457 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5458 {
5459 	int rv;
5460 
5461 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
5462 		return 1;
5463 
5464 	if (wm_acquire_eeprom(sc))
5465 		return 1;
5466 
5467 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5468 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5469 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5470 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5471 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5472 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5473 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
5474 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5475 	else
5476 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5477 
5478 	wm_release_eeprom(sc);
5479 	return rv;
5480 }
5481 
5482 static int
5483 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5484     uint16_t *data)
5485 {
5486 	int i, eerd = 0;
5487 	int error = 0;
5488 
5489 	for (i = 0; i < wordcnt; i++) {
5490 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5491 
5492 		CSR_WRITE(sc, WMREG_EERD, eerd);
5493 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5494 		if (error != 0)
5495 			break;
5496 
5497 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5498 	}
5499 
5500 	return error;
5501 }
5502 
5503 static int
5504 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5505 {
5506 	uint32_t attempts = 100000;
5507 	uint32_t i, reg = 0;
5508 	int32_t done = -1;
5509 
5510 	for (i = 0; i < attempts; i++) {
5511 		reg = CSR_READ(sc, rw);
5512 
5513 		if (reg & EERD_DONE) {
5514 			done = 0;
5515 			break;
5516 		}
5517 		delay(5);
5518 	}
5519 
5520 	return done;
5521 }
5522 
5523 static int
5524 wm_check_alt_mac_addr(struct wm_softc *sc)
5525 {
5526 	uint16_t myea[ETHER_ADDR_LEN / 2];
5527 	uint16_t offset = EEPROM_OFF_MACADDR;
5528 
5529 	/* Try to read alternative MAC address pointer */
5530 	if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5531 		return -1;
5532 
5533 	/* Check pointer */
5534 	if (offset == 0xffff)
5535 		return -1;
5536 
5537 	/*
5538 	 * Check whether alternative MAC address is valid or not.
5539 	 * Some cards have non 0xffff pointer but those don't use
5540 	 * alternative MAC address in reality.
5541 	 *
5542 	 * Check whether the broadcast bit is set or not.
5543 	 */
5544 	if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5545 		if (((myea[0] & 0xff) & 0x01) == 0)
5546 			return 0; /* found! */
5547 
5548 	/* not found */
5549 	return -1;
5550 }
5551 
5552 static int
5553 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5554 {
5555 	uint16_t myea[ETHER_ADDR_LEN / 2];
5556 	uint16_t offset = EEPROM_OFF_MACADDR;
5557 	int do_invert = 0;
5558 
5559 	switch (sc->sc_type) {
5560 	case WM_T_82580:
5561 	case WM_T_82580ER:
5562 	case WM_T_I350:
5563 	case WM_T_I354:
5564 		switch (sc->sc_funcid) {
5565 		case 0:
5566 			/* default value (== EEPROM_OFF_MACADDR) */
5567 			break;
5568 		case 1:
5569 			offset = EEPROM_OFF_LAN1;
5570 			break;
5571 		case 2:
5572 			offset = EEPROM_OFF_LAN2;
5573 			break;
5574 		case 3:
5575 			offset = EEPROM_OFF_LAN3;
5576 			break;
5577 		default:
5578 			goto bad;
5579 			/* NOTREACHED */
5580 			break;
5581 		}
5582 		break;
5583 	case WM_T_82571:
5584 	case WM_T_82575:
5585 	case WM_T_82576:
5586 	case WM_T_80003:
5587 	case WM_T_I210:
5588 	case WM_T_I211:
5589 		if (wm_check_alt_mac_addr(sc) != 0) {
5590 			/* reset the offset to LAN0 */
5591 			offset = EEPROM_OFF_MACADDR;
5592 			if ((sc->sc_funcid & 0x01) == 1)
5593 				do_invert = 1;
5594 			goto do_read;
5595 		}
5596 		switch (sc->sc_funcid) {
5597 		case 0:
5598 			/*
5599 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5600 			 * itself.
5601 			 */
5602 			break;
5603 		case 1:
5604 			offset += EEPROM_OFF_MACADDR_LAN1;
5605 			break;
5606 		case 2:
5607 			offset += EEPROM_OFF_MACADDR_LAN2;
5608 			break;
5609 		case 3:
5610 			offset += EEPROM_OFF_MACADDR_LAN3;
5611 			break;
5612 		default:
5613 			goto bad;
5614 			/* NOTREACHED */
5615 			break;
5616 		}
5617 		break;
5618 	default:
5619 		if ((sc->sc_funcid & 0x01) == 1)
5620 			do_invert = 1;
5621 		break;
5622 	}
5623 
5624  do_read:
5625 	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5626 		myea) != 0) {
5627 		goto bad;
5628 	}
5629 
5630 	enaddr[0] = myea[0] & 0xff;
5631 	enaddr[1] = myea[0] >> 8;
5632 	enaddr[2] = myea[1] & 0xff;
5633 	enaddr[3] = myea[1] >> 8;
5634 	enaddr[4] = myea[2] & 0xff;
5635 	enaddr[5] = myea[2] >> 8;
5636 
5637 	/*
5638 	 * Toggle the LSB of the MAC address on the second port
5639 	 * of some dual port cards.
5640 	 */
5641 	if (do_invert != 0)
5642 		enaddr[5] ^= 1;
5643 
5644 	return 0;
5645 
5646  bad:
5647 	aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5648 
5649 	return -1;
5650 }
5651 
5652 /*
5653  * wm_add_rxbuf:
5654  *
5655  *	Add a receive buffer to the indiciated descriptor.
5656  */
5657 static int
5658 wm_add_rxbuf(struct wm_softc *sc, int idx)
5659 {
5660 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5661 	struct mbuf *m;
5662 	int error;
5663 
5664 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5665 	if (m == NULL)
5666 		return ENOBUFS;
5667 
5668 	MCLGET(m, M_DONTWAIT);
5669 	if ((m->m_flags & M_EXT) == 0) {
5670 		m_freem(m);
5671 		return ENOBUFS;
5672 	}
5673 
5674 	if (rxs->rxs_mbuf != NULL)
5675 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5676 
5677 	rxs->rxs_mbuf = m;
5678 
5679 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5680 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5681 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
5682 	if (error) {
5683 		/* XXX XXX XXX */
5684 		aprint_error_dev(sc->sc_dev,
5685 		    "unable to load rx DMA map %d, error = %d\n",
5686 		    idx, error);
5687 		panic("wm_add_rxbuf");
5688 	}
5689 
5690 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5691 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5692 
5693 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5694 		if ((sc->sc_rctl & RCTL_EN) != 0)
5695 			WM_INIT_RXDESC(sc, idx);
5696 	} else
5697 		WM_INIT_RXDESC(sc, idx);
5698 
5699 	return 0;
5700 }
5701 
5702 /*
5703  * wm_set_ral:
5704  *
5705  *	Set an entery in the receive address list.
5706  */
5707 static void
5708 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5709 {
5710 	uint32_t ral_lo, ral_hi;
5711 
5712 	if (enaddr != NULL) {
5713 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5714 		    (enaddr[3] << 24);
5715 		ral_hi = enaddr[4] | (enaddr[5] << 8);
5716 		ral_hi |= RAL_AV;
5717 	} else {
5718 		ral_lo = 0;
5719 		ral_hi = 0;
5720 	}
5721 
5722 	if (sc->sc_type >= WM_T_82544) {
5723 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5724 		    ral_lo);
5725 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5726 		    ral_hi);
5727 	} else {
5728 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5729 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5730 	}
5731 }
5732 
5733 /*
5734  * wm_mchash:
5735  *
5736  *	Compute the hash of the multicast address for the 4096-bit
5737  *	multicast filter.
5738  */
5739 static uint32_t
5740 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5741 {
5742 	static const int lo_shift[4] = { 4, 3, 2, 0 };
5743 	static const int hi_shift[4] = { 4, 5, 6, 8 };
5744 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5745 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5746 	uint32_t hash;
5747 
5748 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5749 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5750 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5751 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5752 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5753 		return (hash & 0x3ff);
5754 	}
5755 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5756 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5757 
5758 	return (hash & 0xfff);
5759 }
5760 
5761 /*
5762  * wm_set_filter:
5763  *
5764  *	Set up the receive filter.
5765  */
5766 static void
5767 wm_set_filter(struct wm_softc *sc)
5768 {
5769 	struct ethercom *ec = &sc->sc_ethercom;
5770 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5771 	struct ether_multi *enm;
5772 	struct ether_multistep step;
5773 	bus_addr_t mta_reg;
5774 	uint32_t hash, reg, bit;
5775 	int i, size;
5776 
5777 	if (sc->sc_type >= WM_T_82544)
5778 		mta_reg = WMREG_CORDOVA_MTA;
5779 	else
5780 		mta_reg = WMREG_MTA;
5781 
5782 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5783 
5784 	if (ifp->if_flags & IFF_BROADCAST)
5785 		sc->sc_rctl |= RCTL_BAM;
5786 	if (ifp->if_flags & IFF_PROMISC) {
5787 		sc->sc_rctl |= RCTL_UPE;
5788 		goto allmulti;
5789 	}
5790 
5791 	/*
5792 	 * Set the station address in the first RAL slot, and
5793 	 * clear the remaining slots.
5794 	 */
5795 	if (sc->sc_type == WM_T_ICH8)
5796 		size = WM_RAL_TABSIZE_ICH8 -1;
5797 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5798 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
5799 	    || (sc->sc_type == WM_T_PCH_LPT))
5800 		size = WM_RAL_TABSIZE_ICH8;
5801 	else if (sc->sc_type == WM_T_82575)
5802 		size = WM_RAL_TABSIZE_82575;
5803 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5804 		size = WM_RAL_TABSIZE_82576;
5805 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5806 		size = WM_RAL_TABSIZE_I350;
5807 	else
5808 		size = WM_RAL_TABSIZE;
5809 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5810 	for (i = 1; i < size; i++)
5811 		wm_set_ral(sc, NULL, i);
5812 
5813 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5814 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5815 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5816 		size = WM_ICH8_MC_TABSIZE;
5817 	else
5818 		size = WM_MC_TABSIZE;
5819 	/* Clear out the multicast table. */
5820 	for (i = 0; i < size; i++)
5821 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
5822 
5823 	ETHER_FIRST_MULTI(step, ec, enm);
5824 	while (enm != NULL) {
5825 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5826 			/*
5827 			 * We must listen to a range of multicast addresses.
5828 			 * For now, just accept all multicasts, rather than
5829 			 * trying to set only those filter bits needed to match
5830 			 * the range.  (At this time, the only use of address
5831 			 * ranges is for IP multicast routing, for which the
5832 			 * range is big enough to require all bits set.)
5833 			 */
5834 			goto allmulti;
5835 		}
5836 
5837 		hash = wm_mchash(sc, enm->enm_addrlo);
5838 
5839 		reg = (hash >> 5);
5840 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5841 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5842 		    || (sc->sc_type == WM_T_PCH2)
5843 		    || (sc->sc_type == WM_T_PCH_LPT))
5844 			reg &= 0x1f;
5845 		else
5846 			reg &= 0x7f;
5847 		bit = hash & 0x1f;
5848 
5849 		hash = CSR_READ(sc, mta_reg + (reg << 2));
5850 		hash |= 1U << bit;
5851 
5852 		/* XXX Hardware bug?? */
5853 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5854 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5855 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5856 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5857 		} else
5858 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5859 
5860 		ETHER_NEXT_MULTI(step, enm);
5861 	}
5862 
5863 	ifp->if_flags &= ~IFF_ALLMULTI;
5864 	goto setit;
5865 
5866  allmulti:
5867 	ifp->if_flags |= IFF_ALLMULTI;
5868 	sc->sc_rctl |= RCTL_MPE;
5869 
5870  setit:
5871 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5872 }
5873 
5874 /*
5875  * wm_tbi_mediainit:
5876  *
5877  *	Initialize media for use on 1000BASE-X devices.
5878  */
5879 static void
5880 wm_tbi_mediainit(struct wm_softc *sc)
5881 {
5882 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5883 	const char *sep = "";
5884 
5885 	if (sc->sc_type < WM_T_82543)
5886 		sc->sc_tipg = TIPG_WM_DFLT;
5887 	else
5888 		sc->sc_tipg = TIPG_LG_DFLT;
5889 
5890 	sc->sc_tbi_anegticks = 5;
5891 
5892 	/* Initialize our media structures */
5893 	sc->sc_mii.mii_ifp = ifp;
5894 
5895 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5896 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5897 	    wm_tbi_mediastatus);
5898 
5899 	/*
5900 	 * SWD Pins:
5901 	 *
5902 	 *	0 = Link LED (output)
5903 	 *	1 = Loss Of Signal (input)
5904 	 */
5905 	sc->sc_ctrl |= CTRL_SWDPIO(0);
5906 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5907 
5908 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5909 
5910 #define	ADD(ss, mm, dd)							\
5911 do {									\
5912 	aprint_normal("%s%s", sep, ss);					\
5913 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
5914 	sep = ", ";							\
5915 } while (/*CONSTCOND*/0)
5916 
5917 	aprint_normal_dev(sc->sc_dev, "");
5918 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5919 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5920 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5921 	aprint_normal("\n");
5922 
5923 #undef ADD
5924 
5925 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5926 }
5927 
5928 /*
5929  * wm_tbi_mediastatus:	[ifmedia interface function]
5930  *
5931  *	Get the current interface media status on a 1000BASE-X device.
5932  */
5933 static void
5934 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5935 {
5936 	struct wm_softc *sc = ifp->if_softc;
5937 	uint32_t ctrl, status;
5938 
5939 	ifmr->ifm_status = IFM_AVALID;
5940 	ifmr->ifm_active = IFM_ETHER;
5941 
5942 	status = CSR_READ(sc, WMREG_STATUS);
5943 	if ((status & STATUS_LU) == 0) {
5944 		ifmr->ifm_active |= IFM_NONE;
5945 		return;
5946 	}
5947 
5948 	ifmr->ifm_status |= IFM_ACTIVE;
5949 	ifmr->ifm_active |= IFM_1000_SX;
5950 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5951 		ifmr->ifm_active |= IFM_FDX;
5952 	ctrl = CSR_READ(sc, WMREG_CTRL);
5953 	if (ctrl & CTRL_RFCE)
5954 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5955 	if (ctrl & CTRL_TFCE)
5956 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5957 }
5958 
5959 /*
5960  * wm_tbi_mediachange:	[ifmedia interface function]
5961  *
5962  *	Set hardware to newly-selected media on a 1000BASE-X device.
5963  */
5964 static int
5965 wm_tbi_mediachange(struct ifnet *ifp)
5966 {
5967 	struct wm_softc *sc = ifp->if_softc;
5968 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5969 	uint32_t status;
5970 	int i;
5971 
5972 	sc->sc_txcw = 0;
5973 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5974 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5975 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5976 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5977 		sc->sc_txcw |= TXCW_ANE;
5978 	} else {
5979 		/*
5980 		 * If autonegotiation is turned off, force link up and turn on
5981 		 * full duplex
5982 		 */
5983 		sc->sc_txcw &= ~TXCW_ANE;
5984 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
5985 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
5986 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5987 		delay(1000);
5988 	}
5989 
5990 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
5991 		    device_xname(sc->sc_dev),sc->sc_txcw));
5992 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
5993 	delay(10000);
5994 
5995 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
5996 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
5997 
5998 	/*
5999 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
6000 	 * optics detect a signal, 0 if they don't.
6001 	 */
6002 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
6003 		/* Have signal; wait for the link to come up. */
6004 
6005 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6006 			/*
6007 			 * Reset the link, and let autonegotiation do its thing
6008 			 */
6009 			sc->sc_ctrl |= CTRL_LRST;
6010 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6011 			delay(1000);
6012 			sc->sc_ctrl &= ~CTRL_LRST;
6013 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6014 			delay(1000);
6015 		}
6016 
6017 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
6018 			delay(10000);
6019 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
6020 				break;
6021 		}
6022 
6023 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
6024 			    device_xname(sc->sc_dev),i));
6025 
6026 		status = CSR_READ(sc, WMREG_STATUS);
6027 		DPRINTF(WM_DEBUG_LINK,
6028 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
6029 			device_xname(sc->sc_dev),status, STATUS_LU));
6030 		if (status & STATUS_LU) {
6031 			/* Link is up. */
6032 			DPRINTF(WM_DEBUG_LINK,
6033 			    ("%s: LINK: set media -> link up %s\n",
6034 			    device_xname(sc->sc_dev),
6035 			    (status & STATUS_FD) ? "FDX" : "HDX"));
6036 
6037 			/*
6038 			 * NOTE: CTRL will update TFCE and RFCE automatically,
6039 			 * so we should update sc->sc_ctrl
6040 			 */
6041 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6042 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6043 			sc->sc_fcrtl &= ~FCRTL_XONE;
6044 			if (status & STATUS_FD)
6045 				sc->sc_tctl |=
6046 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6047 			else
6048 				sc->sc_tctl |=
6049 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6050 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
6051 				sc->sc_fcrtl |= FCRTL_XONE;
6052 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6053 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6054 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
6055 				      sc->sc_fcrtl);
6056 			sc->sc_tbi_linkup = 1;
6057 		} else {
6058 			if (i == WM_LINKUP_TIMEOUT)
6059 				wm_check_for_link(sc);
6060 			/* Link is down. */
6061 			DPRINTF(WM_DEBUG_LINK,
6062 			    ("%s: LINK: set media -> link down\n",
6063 			    device_xname(sc->sc_dev)));
6064 			sc->sc_tbi_linkup = 0;
6065 		}
6066 	} else {
6067 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
6068 		    device_xname(sc->sc_dev)));
6069 		sc->sc_tbi_linkup = 0;
6070 	}
6071 
6072 	wm_tbi_set_linkled(sc);
6073 
6074 	return 0;
6075 }
6076 
6077 /*
6078  * wm_tbi_set_linkled:
6079  *
6080  *	Update the link LED on 1000BASE-X devices.
6081  */
6082 static void
6083 wm_tbi_set_linkled(struct wm_softc *sc)
6084 {
6085 
6086 	if (sc->sc_tbi_linkup)
6087 		sc->sc_ctrl |= CTRL_SWDPIN(0);
6088 	else
6089 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6090 
6091 	/* 82540 or newer devices are active low */
6092 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6093 
6094 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6095 }
6096 
6097 /*
6098  * wm_tbi_check_link:
6099  *
6100  *	Check the link on 1000BASE-X devices.
6101  */
6102 static void
6103 wm_tbi_check_link(struct wm_softc *sc)
6104 {
6105 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6106 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6107 	uint32_t status;
6108 
6109 	status = CSR_READ(sc, WMREG_STATUS);
6110 
6111 	/* XXX is this needed? */
6112 	(void)CSR_READ(sc, WMREG_RXCW);
6113 	(void)CSR_READ(sc, WMREG_CTRL);
6114 
6115 	/* set link status */
6116 	if ((status & STATUS_LU) == 0) {
6117 		DPRINTF(WM_DEBUG_LINK,
6118 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6119 		sc->sc_tbi_linkup = 0;
6120 	} else if (sc->sc_tbi_linkup == 0) {
6121 		DPRINTF(WM_DEBUG_LINK,
6122 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6123 		    (status & STATUS_FD) ? "FDX" : "HDX"));
6124 		sc->sc_tbi_linkup = 1;
6125 	}
6126 
6127 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6128 	    && ((status & STATUS_LU) == 0)) {
6129 		sc->sc_tbi_linkup = 0;
6130 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6131 			/* RXCFG storm! */
6132 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6133 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6134 			wm_init(ifp);
6135 			ifp->if_start(ifp);
6136 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6137 			/* If the timer expired, retry autonegotiation */
6138 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6139 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6140 				sc->sc_tbi_ticks = 0;
6141 				/*
6142 				 * Reset the link, and let autonegotiation do
6143 				 * its thing
6144 				 */
6145 				sc->sc_ctrl |= CTRL_LRST;
6146 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6147 				delay(1000);
6148 				sc->sc_ctrl &= ~CTRL_LRST;
6149 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6150 				delay(1000);
6151 				CSR_WRITE(sc, WMREG_TXCW,
6152 				    sc->sc_txcw & ~TXCW_ANE);
6153 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6154 			}
6155 		}
6156 	}
6157 
6158 	wm_tbi_set_linkled(sc);
6159 }
6160 
6161 /*
6162  * wm_gmii_reset:
6163  *
6164  *	Reset the PHY.
6165  */
6166 static void
6167 wm_gmii_reset(struct wm_softc *sc)
6168 {
6169 	uint32_t reg;
6170 	int rv;
6171 
6172 	/* get phy semaphore */
6173 	switch (sc->sc_type) {
6174 	case WM_T_82571:
6175 	case WM_T_82572:
6176 	case WM_T_82573:
6177 	case WM_T_82574:
6178 	case WM_T_82583:
6179 		 /* XXX should get sw semaphore, too */
6180 		rv = wm_get_swsm_semaphore(sc);
6181 		break;
6182 	case WM_T_82575:
6183 	case WM_T_82576:
6184 	case WM_T_82580:
6185 	case WM_T_82580ER:
6186 	case WM_T_I350:
6187 	case WM_T_I354:
6188 	case WM_T_I210:
6189 	case WM_T_I211:
6190 	case WM_T_80003:
6191 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6192 		break;
6193 	case WM_T_ICH8:
6194 	case WM_T_ICH9:
6195 	case WM_T_ICH10:
6196 	case WM_T_PCH:
6197 	case WM_T_PCH2:
6198 	case WM_T_PCH_LPT:
6199 		rv = wm_get_swfwhw_semaphore(sc);
6200 		break;
6201 	default:
6202 		/* nothing to do*/
6203 		rv = 0;
6204 		break;
6205 	}
6206 	if (rv != 0) {
6207 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6208 		    __func__);
6209 		return;
6210 	}
6211 
6212 	switch (sc->sc_type) {
6213 	case WM_T_82542_2_0:
6214 	case WM_T_82542_2_1:
6215 		/* null */
6216 		break;
6217 	case WM_T_82543:
6218 		/*
6219 		 * With 82543, we need to force speed and duplex on the MAC
6220 		 * equal to what the PHY speed and duplex configuration is.
6221 		 * In addition, we need to perform a hardware reset on the PHY
6222 		 * to take it out of reset.
6223 		 */
6224 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6225 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6226 
6227 		/* The PHY reset pin is active-low. */
6228 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6229 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6230 		    CTRL_EXT_SWDPIN(4));
6231 		reg |= CTRL_EXT_SWDPIO(4);
6232 
6233 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6234 		delay(10*1000);
6235 
6236 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6237 		delay(150);
6238 #if 0
6239 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6240 #endif
6241 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
6242 		break;
6243 	case WM_T_82544:	/* reset 10000us */
6244 	case WM_T_82540:
6245 	case WM_T_82545:
6246 	case WM_T_82545_3:
6247 	case WM_T_82546:
6248 	case WM_T_82546_3:
6249 	case WM_T_82541:
6250 	case WM_T_82541_2:
6251 	case WM_T_82547:
6252 	case WM_T_82547_2:
6253 	case WM_T_82571:	/* reset 100us */
6254 	case WM_T_82572:
6255 	case WM_T_82573:
6256 	case WM_T_82574:
6257 	case WM_T_82575:
6258 	case WM_T_82576:
6259 	case WM_T_82580:
6260 	case WM_T_82580ER:
6261 	case WM_T_I350:
6262 	case WM_T_I354:
6263 	case WM_T_I210:
6264 	case WM_T_I211:
6265 	case WM_T_82583:
6266 	case WM_T_80003:
6267 		/* generic reset */
6268 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6269 		delay(20000);
6270 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6271 		delay(20000);
6272 
6273 		if ((sc->sc_type == WM_T_82541)
6274 		    || (sc->sc_type == WM_T_82541_2)
6275 		    || (sc->sc_type == WM_T_82547)
6276 		    || (sc->sc_type == WM_T_82547_2)) {
6277 			/* workaround for igp are done in igp_reset() */
6278 			/* XXX add code to set LED after phy reset */
6279 		}
6280 		break;
6281 	case WM_T_ICH8:
6282 	case WM_T_ICH9:
6283 	case WM_T_ICH10:
6284 	case WM_T_PCH:
6285 	case WM_T_PCH2:
6286 	case WM_T_PCH_LPT:
6287 		/* generic reset */
6288 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6289 		delay(100);
6290 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6291 		delay(150);
6292 		break;
6293 	default:
6294 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6295 		    __func__);
6296 		break;
6297 	}
6298 
6299 	/* release PHY semaphore */
6300 	switch (sc->sc_type) {
6301 	case WM_T_82571:
6302 	case WM_T_82572:
6303 	case WM_T_82573:
6304 	case WM_T_82574:
6305 	case WM_T_82583:
6306 		 /* XXX should put sw semaphore, too */
6307 		wm_put_swsm_semaphore(sc);
6308 		break;
6309 	case WM_T_82575:
6310 	case WM_T_82576:
6311 	case WM_T_82580:
6312 	case WM_T_82580ER:
6313 	case WM_T_I350:
6314 	case WM_T_I354:
6315 	case WM_T_I210:
6316 	case WM_T_I211:
6317 	case WM_T_80003:
6318 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6319 		break;
6320 	case WM_T_ICH8:
6321 	case WM_T_ICH9:
6322 	case WM_T_ICH10:
6323 	case WM_T_PCH:
6324 	case WM_T_PCH2:
6325 	case WM_T_PCH_LPT:
6326 		wm_put_swfwhw_semaphore(sc);
6327 		break;
6328 	default:
6329 		/* nothing to do*/
6330 		rv = 0;
6331 		break;
6332 	}
6333 
6334 	/* get_cfg_done */
6335 	wm_get_cfg_done(sc);
6336 
6337 	/* extra setup */
6338 	switch (sc->sc_type) {
6339 	case WM_T_82542_2_0:
6340 	case WM_T_82542_2_1:
6341 	case WM_T_82543:
6342 	case WM_T_82544:
6343 	case WM_T_82540:
6344 	case WM_T_82545:
6345 	case WM_T_82545_3:
6346 	case WM_T_82546:
6347 	case WM_T_82546_3:
6348 	case WM_T_82541_2:
6349 	case WM_T_82547_2:
6350 	case WM_T_82571:
6351 	case WM_T_82572:
6352 	case WM_T_82573:
6353 	case WM_T_82574:
6354 	case WM_T_82575:
6355 	case WM_T_82576:
6356 	case WM_T_82580:
6357 	case WM_T_82580ER:
6358 	case WM_T_I350:
6359 	case WM_T_I354:
6360 	case WM_T_I210:
6361 	case WM_T_I211:
6362 	case WM_T_82583:
6363 	case WM_T_80003:
6364 		/* null */
6365 		break;
6366 	case WM_T_82541:
6367 	case WM_T_82547:
6368 		/* XXX Configure actively LED after PHY reset */
6369 		break;
6370 	case WM_T_ICH8:
6371 	case WM_T_ICH9:
6372 	case WM_T_ICH10:
6373 	case WM_T_PCH:
6374 	case WM_T_PCH2:
6375 	case WM_T_PCH_LPT:
6376 		/* Allow time for h/w to get to a quiescent state afer reset */
6377 		delay(10*1000);
6378 
6379 		if (sc->sc_type == WM_T_PCH)
6380 			wm_hv_phy_workaround_ich8lan(sc);
6381 
6382 		if (sc->sc_type == WM_T_PCH2)
6383 			wm_lv_phy_workaround_ich8lan(sc);
6384 
6385 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6386 			/*
6387 			 * dummy read to clear the phy wakeup bit after lcd
6388 			 * reset
6389 			 */
6390 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6391 		}
6392 
6393 		/*
6394 		 * XXX Configure the LCD with th extended configuration region
6395 		 * in NVM
6396 		 */
6397 
6398 		/* Configure the LCD with the OEM bits in NVM */
6399 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6400 		    || (sc->sc_type == WM_T_PCH_LPT)) {
6401 			/*
6402 			 * Disable LPLU.
6403 			 * XXX It seems that 82567 has LPLU, too.
6404 			 */
6405 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6406 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6407 			reg |= HV_OEM_BITS_ANEGNOW;
6408 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6409 		}
6410 		break;
6411 	default:
6412 		panic("%s: unknown type\n", __func__);
6413 		break;
6414 	}
6415 }
6416 
6417 /*
6418  * wm_get_phy_id_82575:
6419  *
6420  * Return PHY ID. Return -1 if it failed.
6421  */
6422 static int
6423 wm_get_phy_id_82575(struct wm_softc *sc)
6424 {
6425 	uint32_t reg;
6426 	int phyid = -1;
6427 
6428 	/* XXX */
6429 	if ((sc->sc_flags & WM_F_SGMII) == 0)
6430 		return -1;
6431 
6432 	if (wm_sgmii_uses_mdio(sc)) {
6433 		switch (sc->sc_type) {
6434 		case WM_T_82575:
6435 		case WM_T_82576:
6436 			reg = CSR_READ(sc, WMREG_MDIC);
6437 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6438 			break;
6439 		case WM_T_82580:
6440 		case WM_T_I350:
6441 		case WM_T_I354:
6442 		case WM_T_I210:
6443 		case WM_T_I211:
6444 			reg = CSR_READ(sc, WMREG_MDICNFG);
6445 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6446 			break;
6447 		default:
6448 			return -1;
6449 		}
6450 	}
6451 
6452 	return phyid;
6453 }
6454 
6455 
6456 /*
6457  * wm_gmii_mediainit:
6458  *
6459  *	Initialize media for use on 1000BASE-T devices.
6460  */
6461 static void
6462 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6463 {
6464 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6465 	struct mii_data *mii = &sc->sc_mii;
6466 
6467 	/* We have MII. */
6468 	sc->sc_flags |= WM_F_HAS_MII;
6469 
6470 	if (sc->sc_type == WM_T_80003)
6471 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6472 	else
6473 		sc->sc_tipg = TIPG_1000T_DFLT;
6474 
6475 	/*
6476 	 * Let the chip set speed/duplex on its own based on
6477 	 * signals from the PHY.
6478 	 * XXXbouyer - I'm not sure this is right for the 80003,
6479 	 * the em driver only sets CTRL_SLU here - but it seems to work.
6480 	 */
6481 	sc->sc_ctrl |= CTRL_SLU;
6482 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6483 
6484 	/* Initialize our media structures and probe the GMII. */
6485 	mii->mii_ifp = ifp;
6486 
6487 	/*
6488 	 * Determine the PHY access method.
6489 	 *
6490 	 *  For SGMII, use SGMII specific method.
6491 	 *
6492 	 *  For some devices, we can determine the PHY access method
6493 	 * from sc_type.
6494 	 *
6495 	 *  For ICH8 variants, it's difficult to detemine the PHY access
6496 	 * method by sc_type, so use the PCI product ID for some devices.
6497 	 * For other ICH8 variants, try to use igp's method. If the PHY
6498 	 * can't detect, then use bm's method.
6499 	 */
6500 	switch (prodid) {
6501 	case PCI_PRODUCT_INTEL_PCH_M_LM:
6502 	case PCI_PRODUCT_INTEL_PCH_M_LC:
6503 		/* 82577 */
6504 		sc->sc_phytype = WMPHY_82577;
6505 		mii->mii_readreg = wm_gmii_hv_readreg;
6506 		mii->mii_writereg = wm_gmii_hv_writereg;
6507 		break;
6508 	case PCI_PRODUCT_INTEL_PCH_D_DM:
6509 	case PCI_PRODUCT_INTEL_PCH_D_DC:
6510 		/* 82578 */
6511 		sc->sc_phytype = WMPHY_82578;
6512 		mii->mii_readreg = wm_gmii_hv_readreg;
6513 		mii->mii_writereg = wm_gmii_hv_writereg;
6514 		break;
6515 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6516 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
6517 		/* 82579 */
6518 		sc->sc_phytype = WMPHY_82579;
6519 		mii->mii_readreg = wm_gmii_hv_readreg;
6520 		mii->mii_writereg = wm_gmii_hv_writereg;
6521 		break;
6522 	case PCI_PRODUCT_INTEL_I217_LM:
6523 	case PCI_PRODUCT_INTEL_I217_V:
6524 	case PCI_PRODUCT_INTEL_I218_LM:
6525 	case PCI_PRODUCT_INTEL_I218_V:
6526 		/* I21[78] */
6527 		mii->mii_readreg = wm_gmii_hv_readreg;
6528 		mii->mii_writereg = wm_gmii_hv_writereg;
6529 		break;
6530 	case PCI_PRODUCT_INTEL_82801I_BM:
6531 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6532 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6533 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6534 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6535 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6536 		/* 82567 */
6537 		sc->sc_phytype = WMPHY_BM;
6538 		mii->mii_readreg = wm_gmii_bm_readreg;
6539 		mii->mii_writereg = wm_gmii_bm_writereg;
6540 		break;
6541 	default:
6542 		if (((sc->sc_flags & WM_F_SGMII) != 0)
6543 		    && !wm_sgmii_uses_mdio(sc)){
6544 			mii->mii_readreg = wm_sgmii_readreg;
6545 			mii->mii_writereg = wm_sgmii_writereg;
6546 		} else if (sc->sc_type >= WM_T_80003) {
6547 			mii->mii_readreg = wm_gmii_i80003_readreg;
6548 			mii->mii_writereg = wm_gmii_i80003_writereg;
6549 		} else if (sc->sc_type >= WM_T_I210) {
6550 			mii->mii_readreg = wm_gmii_i82544_readreg;
6551 			mii->mii_writereg = wm_gmii_i82544_writereg;
6552 		} else if (sc->sc_type >= WM_T_82580) {
6553 			sc->sc_phytype = WMPHY_82580;
6554 			mii->mii_readreg = wm_gmii_82580_readreg;
6555 			mii->mii_writereg = wm_gmii_82580_writereg;
6556 		} else if (sc->sc_type >= WM_T_82544) {
6557 			mii->mii_readreg = wm_gmii_i82544_readreg;
6558 			mii->mii_writereg = wm_gmii_i82544_writereg;
6559 		} else {
6560 			mii->mii_readreg = wm_gmii_i82543_readreg;
6561 			mii->mii_writereg = wm_gmii_i82543_writereg;
6562 		}
6563 		break;
6564 	}
6565 	mii->mii_statchg = wm_gmii_statchg;
6566 
6567 	wm_gmii_reset(sc);
6568 
6569 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
6570 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6571 	    wm_gmii_mediastatus);
6572 
6573 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6574 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6575 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6576 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6577 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
6578 			/* Attach only one port */
6579 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6580 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
6581 		} else {
6582 			int i, id;
6583 			uint32_t ctrl_ext;
6584 
6585 			id = wm_get_phy_id_82575(sc);
6586 			if (id != -1) {
6587 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6588 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6589 			}
6590 			if ((id == -1)
6591 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6592 				/* Power on sgmii phy if it is disabled */
6593 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6594 				CSR_WRITE(sc, WMREG_CTRL_EXT,
6595 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6596 				CSR_WRITE_FLUSH(sc);
6597 				delay(300*1000); /* XXX too long */
6598 
6599 				/* from 1 to 8 */
6600 				for (i = 1; i < 8; i++)
6601 					mii_attach(sc->sc_dev, &sc->sc_mii,
6602 					    0xffffffff, i, MII_OFFSET_ANY,
6603 					    MIIF_DOPAUSE);
6604 
6605 				/* restore previous sfp cage power state */
6606 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6607 			}
6608 		}
6609 	} else {
6610 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6611 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6612 	}
6613 
6614 	/*
6615 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6616 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6617 	 */
6618 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6619 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
6620 		wm_set_mdio_slow_mode_hv(sc);
6621 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6622 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6623 	}
6624 
6625 	/*
6626 	 * (For ICH8 variants)
6627 	 * If PHY detection failed, use BM's r/w function and retry.
6628 	 */
6629 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6630 		/* if failed, retry with *_bm_* */
6631 		mii->mii_readreg = wm_gmii_bm_readreg;
6632 		mii->mii_writereg = wm_gmii_bm_writereg;
6633 
6634 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6635 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6636 	}
6637 
6638 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6639 		/* Any PHY wasn't find */
6640 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6641 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6642 		sc->sc_phytype = WMPHY_NONE;
6643 	} else {
6644 		/*
6645 		 * PHY Found!
6646 		 * Check PHY type.
6647 		 */
6648 		uint32_t model;
6649 		struct mii_softc *child;
6650 
6651 		child = LIST_FIRST(&mii->mii_phys);
6652 		if (device_is_a(child->mii_dev, "igphy")) {
6653 			struct igphy_softc *isc = (struct igphy_softc *)child;
6654 
6655 			model = isc->sc_mii.mii_mpd_model;
6656 			if (model == MII_MODEL_yyINTEL_I82566)
6657 				sc->sc_phytype = WMPHY_IGP_3;
6658 		}
6659 
6660 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6661 	}
6662 }
6663 
6664 /*
6665  * wm_gmii_mediastatus:	[ifmedia interface function]
6666  *
6667  *	Get the current interface media status on a 1000BASE-T device.
6668  */
6669 static void
6670 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6671 {
6672 	struct wm_softc *sc = ifp->if_softc;
6673 
6674 	ether_mediastatus(ifp, ifmr);
6675 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6676 	    | sc->sc_flowflags;
6677 }
6678 
6679 /*
6680  * wm_gmii_mediachange:	[ifmedia interface function]
6681  *
6682  *	Set hardware to newly-selected media on a 1000BASE-T device.
6683  */
6684 static int
6685 wm_gmii_mediachange(struct ifnet *ifp)
6686 {
6687 	struct wm_softc *sc = ifp->if_softc;
6688 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6689 	int rc;
6690 
6691 	if ((ifp->if_flags & IFF_UP) == 0)
6692 		return 0;
6693 
6694 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6695 	sc->sc_ctrl |= CTRL_SLU;
6696 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6697 	    || (sc->sc_type > WM_T_82543)) {
6698 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6699 	} else {
6700 		sc->sc_ctrl &= ~CTRL_ASDE;
6701 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6702 		if (ife->ifm_media & IFM_FDX)
6703 			sc->sc_ctrl |= CTRL_FD;
6704 		switch (IFM_SUBTYPE(ife->ifm_media)) {
6705 		case IFM_10_T:
6706 			sc->sc_ctrl |= CTRL_SPEED_10;
6707 			break;
6708 		case IFM_100_TX:
6709 			sc->sc_ctrl |= CTRL_SPEED_100;
6710 			break;
6711 		case IFM_1000_T:
6712 			sc->sc_ctrl |= CTRL_SPEED_1000;
6713 			break;
6714 		default:
6715 			panic("wm_gmii_mediachange: bad media 0x%x",
6716 			    ife->ifm_media);
6717 		}
6718 	}
6719 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6720 	if (sc->sc_type <= WM_T_82543)
6721 		wm_gmii_reset(sc);
6722 
6723 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6724 		return 0;
6725 	return rc;
6726 }
6727 
6728 #define	MDI_IO		CTRL_SWDPIN(2)
6729 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
6730 #define	MDI_CLK		CTRL_SWDPIN(3)
6731 
6732 static void
6733 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6734 {
6735 	uint32_t i, v;
6736 
6737 	v = CSR_READ(sc, WMREG_CTRL);
6738 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6739 	v |= MDI_DIR | CTRL_SWDPIO(3);
6740 
6741 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6742 		if (data & i)
6743 			v |= MDI_IO;
6744 		else
6745 			v &= ~MDI_IO;
6746 		CSR_WRITE(sc, WMREG_CTRL, v);
6747 		delay(10);
6748 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6749 		delay(10);
6750 		CSR_WRITE(sc, WMREG_CTRL, v);
6751 		delay(10);
6752 	}
6753 }
6754 
6755 static uint32_t
6756 i82543_mii_recvbits(struct wm_softc *sc)
6757 {
6758 	uint32_t v, i, data = 0;
6759 
6760 	v = CSR_READ(sc, WMREG_CTRL);
6761 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6762 	v |= CTRL_SWDPIO(3);
6763 
6764 	CSR_WRITE(sc, WMREG_CTRL, v);
6765 	delay(10);
6766 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6767 	delay(10);
6768 	CSR_WRITE(sc, WMREG_CTRL, v);
6769 	delay(10);
6770 
6771 	for (i = 0; i < 16; i++) {
6772 		data <<= 1;
6773 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6774 		delay(10);
6775 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6776 			data |= 1;
6777 		CSR_WRITE(sc, WMREG_CTRL, v);
6778 		delay(10);
6779 	}
6780 
6781 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6782 	delay(10);
6783 	CSR_WRITE(sc, WMREG_CTRL, v);
6784 	delay(10);
6785 
6786 	return data;
6787 }
6788 
6789 #undef MDI_IO
6790 #undef MDI_DIR
6791 #undef MDI_CLK
6792 
6793 /*
6794  * wm_gmii_i82543_readreg:	[mii interface function]
6795  *
6796  *	Read a PHY register on the GMII (i82543 version).
6797  */
6798 static int
6799 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6800 {
6801 	struct wm_softc *sc = device_private(self);
6802 	int rv;
6803 
6804 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
6805 	i82543_mii_sendbits(sc, reg | (phy << 5) |
6806 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6807 	rv = i82543_mii_recvbits(sc) & 0xffff;
6808 
6809 	DPRINTF(WM_DEBUG_GMII,
6810 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6811 	    device_xname(sc->sc_dev), phy, reg, rv));
6812 
6813 	return rv;
6814 }
6815 
6816 /*
6817  * wm_gmii_i82543_writereg:	[mii interface function]
6818  *
6819  *	Write a PHY register on the GMII (i82543 version).
6820  */
6821 static void
6822 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6823 {
6824 	struct wm_softc *sc = device_private(self);
6825 
6826 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
6827 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6828 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6829 	    (MII_COMMAND_START << 30), 32);
6830 }
6831 
6832 /*
6833  * wm_gmii_i82544_readreg:	[mii interface function]
6834  *
6835  *	Read a PHY register on the GMII.
6836  */
6837 static int
6838 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6839 {
6840 	struct wm_softc *sc = device_private(self);
6841 	uint32_t mdic = 0;
6842 	int i, rv;
6843 
6844 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6845 	    MDIC_REGADD(reg));
6846 
6847 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6848 		mdic = CSR_READ(sc, WMREG_MDIC);
6849 		if (mdic & MDIC_READY)
6850 			break;
6851 		delay(50);
6852 	}
6853 
6854 	if ((mdic & MDIC_READY) == 0) {
6855 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6856 		    device_xname(sc->sc_dev), phy, reg);
6857 		rv = 0;
6858 	} else if (mdic & MDIC_E) {
6859 #if 0 /* This is normal if no PHY is present. */
6860 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6861 		    device_xname(sc->sc_dev), phy, reg);
6862 #endif
6863 		rv = 0;
6864 	} else {
6865 		rv = MDIC_DATA(mdic);
6866 		if (rv == 0xffff)
6867 			rv = 0;
6868 	}
6869 
6870 	return rv;
6871 }
6872 
6873 /*
6874  * wm_gmii_i82544_writereg:	[mii interface function]
6875  *
6876  *	Write a PHY register on the GMII.
6877  */
6878 static void
6879 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6880 {
6881 	struct wm_softc *sc = device_private(self);
6882 	uint32_t mdic = 0;
6883 	int i;
6884 
6885 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6886 	    MDIC_REGADD(reg) | MDIC_DATA(val));
6887 
6888 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6889 		mdic = CSR_READ(sc, WMREG_MDIC);
6890 		if (mdic & MDIC_READY)
6891 			break;
6892 		delay(50);
6893 	}
6894 
6895 	if ((mdic & MDIC_READY) == 0)
6896 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6897 		    device_xname(sc->sc_dev), phy, reg);
6898 	else if (mdic & MDIC_E)
6899 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6900 		    device_xname(sc->sc_dev), phy, reg);
6901 }
6902 
6903 /*
6904  * wm_gmii_i80003_readreg:	[mii interface function]
6905  *
6906  *	Read a PHY register on the kumeran
6907  * This could be handled by the PHY layer if we didn't have to lock the
6908  * ressource ...
6909  */
6910 static int
6911 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6912 {
6913 	struct wm_softc *sc = device_private(self);
6914 	int sem;
6915 	int rv;
6916 
6917 	if (phy != 1) /* only one PHY on kumeran bus */
6918 		return 0;
6919 
6920 	sem = swfwphysem[sc->sc_funcid];
6921 	if (wm_get_swfw_semaphore(sc, sem)) {
6922 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6923 		    __func__);
6924 		return 0;
6925 	}
6926 
6927 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6928 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6929 		    reg >> GG82563_PAGE_SHIFT);
6930 	} else {
6931 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6932 		    reg >> GG82563_PAGE_SHIFT);
6933 	}
6934 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6935 	delay(200);
6936 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6937 	delay(200);
6938 
6939 	wm_put_swfw_semaphore(sc, sem);
6940 	return rv;
6941 }
6942 
6943 /*
6944  * wm_gmii_i80003_writereg:	[mii interface function]
6945  *
6946  *	Write a PHY register on the kumeran.
6947  * This could be handled by the PHY layer if we didn't have to lock the
6948  * ressource ...
6949  */
6950 static void
6951 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6952 {
6953 	struct wm_softc *sc = device_private(self);
6954 	int sem;
6955 
6956 	if (phy != 1) /* only one PHY on kumeran bus */
6957 		return;
6958 
6959 	sem = swfwphysem[sc->sc_funcid];
6960 	if (wm_get_swfw_semaphore(sc, sem)) {
6961 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6962 		    __func__);
6963 		return;
6964 	}
6965 
6966 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6967 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6968 		    reg >> GG82563_PAGE_SHIFT);
6969 	} else {
6970 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6971 		    reg >> GG82563_PAGE_SHIFT);
6972 	}
6973 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6974 	delay(200);
6975 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
6976 	delay(200);
6977 
6978 	wm_put_swfw_semaphore(sc, sem);
6979 }
6980 
6981 /*
6982  * wm_gmii_bm_readreg:	[mii interface function]
6983  *
6984  *	Read a PHY register on the kumeran
6985  * This could be handled by the PHY layer if we didn't have to lock the
6986  * ressource ...
6987  */
6988 static int
6989 wm_gmii_bm_readreg(device_t self, int phy, int reg)
6990 {
6991 	struct wm_softc *sc = device_private(self);
6992 	int sem;
6993 	int rv;
6994 
6995 	sem = swfwphysem[sc->sc_funcid];
6996 	if (wm_get_swfw_semaphore(sc, sem)) {
6997 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6998 		    __func__);
6999 		return 0;
7000 	}
7001 
7002 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7003 		if (phy == 1)
7004 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7005 			    reg);
7006 		else
7007 			wm_gmii_i82544_writereg(self, phy,
7008 			    GG82563_PHY_PAGE_SELECT,
7009 			    reg >> GG82563_PAGE_SHIFT);
7010 	}
7011 
7012 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7013 	wm_put_swfw_semaphore(sc, sem);
7014 	return rv;
7015 }
7016 
7017 /*
7018  * wm_gmii_bm_writereg:	[mii interface function]
7019  *
7020  *	Write a PHY register on the kumeran.
7021  * This could be handled by the PHY layer if we didn't have to lock the
7022  * ressource ...
7023  */
7024 static void
7025 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7026 {
7027 	struct wm_softc *sc = device_private(self);
7028 	int sem;
7029 
7030 	sem = swfwphysem[sc->sc_funcid];
7031 	if (wm_get_swfw_semaphore(sc, sem)) {
7032 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7033 		    __func__);
7034 		return;
7035 	}
7036 
7037 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7038 		if (phy == 1)
7039 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7040 			    reg);
7041 		else
7042 			wm_gmii_i82544_writereg(self, phy,
7043 			    GG82563_PHY_PAGE_SELECT,
7044 			    reg >> GG82563_PAGE_SHIFT);
7045 	}
7046 
7047 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7048 	wm_put_swfw_semaphore(sc, sem);
7049 }
7050 
7051 static void
7052 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7053 {
7054 	struct wm_softc *sc = device_private(self);
7055 	uint16_t regnum = BM_PHY_REG_NUM(offset);
7056 	uint16_t wuce;
7057 
7058 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
7059 	if (sc->sc_type == WM_T_PCH) {
7060 		/* XXX e1000 driver do nothing... why? */
7061 	}
7062 
7063 	/* Set page 769 */
7064 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7065 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7066 
7067 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7068 
7069 	wuce &= ~BM_WUC_HOST_WU_BIT;
7070 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7071 	    wuce | BM_WUC_ENABLE_BIT);
7072 
7073 	/* Select page 800 */
7074 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7075 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7076 
7077 	/* Write page 800 */
7078 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7079 
7080 	if (rd)
7081 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7082 	else
7083 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7084 
7085 	/* Set page 769 */
7086 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7087 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7088 
7089 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7090 }
7091 
7092 /*
7093  * wm_gmii_hv_readreg:	[mii interface function]
7094  *
7095  *	Read a PHY register on the kumeran
7096  * This could be handled by the PHY layer if we didn't have to lock the
7097  * ressource ...
7098  */
7099 static int
7100 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7101 {
7102 	struct wm_softc *sc = device_private(self);
7103 	uint16_t page = BM_PHY_REG_PAGE(reg);
7104 	uint16_t regnum = BM_PHY_REG_NUM(reg);
7105 	uint16_t val;
7106 	int rv;
7107 
7108 	if (wm_get_swfwhw_semaphore(sc)) {
7109 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7110 		    __func__);
7111 		return 0;
7112 	}
7113 
7114 	/* XXX Workaround failure in MDIO access while cable is disconnected */
7115 	if (sc->sc_phytype == WMPHY_82577) {
7116 		/* XXX must write */
7117 	}
7118 
7119 	/* Page 800 works differently than the rest so it has its own func */
7120 	if (page == BM_WUC_PAGE) {
7121 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7122 		return val;
7123 	}
7124 
7125 	/*
7126 	 * Lower than page 768 works differently than the rest so it has its
7127 	 * own func
7128 	 */
7129 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7130 		printf("gmii_hv_readreg!!!\n");
7131 		return 0;
7132 	}
7133 
7134 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7135 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7136 		    page << BME1000_PAGE_SHIFT);
7137 	}
7138 
7139 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7140 	wm_put_swfwhw_semaphore(sc);
7141 	return rv;
7142 }
7143 
7144 /*
7145  * wm_gmii_hv_writereg:	[mii interface function]
7146  *
7147  *	Write a PHY register on the kumeran.
7148  * This could be handled by the PHY layer if we didn't have to lock the
7149  * ressource ...
7150  */
7151 static void
7152 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7153 {
7154 	struct wm_softc *sc = device_private(self);
7155 	uint16_t page = BM_PHY_REG_PAGE(reg);
7156 	uint16_t regnum = BM_PHY_REG_NUM(reg);
7157 
7158 	if (wm_get_swfwhw_semaphore(sc)) {
7159 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7160 		    __func__);
7161 		return;
7162 	}
7163 
7164 	/* XXX Workaround failure in MDIO access while cable is disconnected */
7165 
7166 	/* Page 800 works differently than the rest so it has its own func */
7167 	if (page == BM_WUC_PAGE) {
7168 		uint16_t tmp;
7169 
7170 		tmp = val;
7171 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7172 		return;
7173 	}
7174 
7175 	/*
7176 	 * Lower than page 768 works differently than the rest so it has its
7177 	 * own func
7178 	 */
7179 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7180 		printf("gmii_hv_writereg!!!\n");
7181 		return;
7182 	}
7183 
7184 	/*
7185 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
7186 	 * Power Down (whenever bit 11 of the PHY control register is set)
7187 	 */
7188 
7189 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7190 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7191 		    page << BME1000_PAGE_SHIFT);
7192 	}
7193 
7194 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7195 	wm_put_swfwhw_semaphore(sc);
7196 }
7197 
7198 /*
7199  * wm_sgmii_uses_mdio
7200  *
7201  * Check whether the transaction is to the internal PHY or the external
7202  * MDIO interface. Return true if it's MDIO.
7203  */
7204 static bool
7205 wm_sgmii_uses_mdio(struct wm_softc *sc)
7206 {
7207 	uint32_t reg;
7208 	bool ismdio = false;
7209 
7210 	switch (sc->sc_type) {
7211 	case WM_T_82575:
7212 	case WM_T_82576:
7213 		reg = CSR_READ(sc, WMREG_MDIC);
7214 		ismdio = ((reg & MDIC_DEST) != 0);
7215 		break;
7216 	case WM_T_82580:
7217 	case WM_T_82580ER:
7218 	case WM_T_I350:
7219 	case WM_T_I354:
7220 	case WM_T_I210:
7221 	case WM_T_I211:
7222 		reg = CSR_READ(sc, WMREG_MDICNFG);
7223 		ismdio = ((reg & MDICNFG_DEST) != 0);
7224 		break;
7225 	default:
7226 		break;
7227 	}
7228 
7229 	return ismdio;
7230 }
7231 
7232 /*
7233  * wm_sgmii_readreg:	[mii interface function]
7234  *
7235  *	Read a PHY register on the SGMII
7236  * This could be handled by the PHY layer if we didn't have to lock the
7237  * ressource ...
7238  */
7239 static int
7240 wm_sgmii_readreg(device_t self, int phy, int reg)
7241 {
7242 	struct wm_softc *sc = device_private(self);
7243 	uint32_t i2ccmd;
7244 	int i, rv;
7245 
7246 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7247 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7248 		    __func__);
7249 		return 0;
7250 	}
7251 
7252 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7253 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7254 	    | I2CCMD_OPCODE_READ;
7255 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7256 
7257 	/* Poll the ready bit */
7258 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7259 		delay(50);
7260 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7261 		if (i2ccmd & I2CCMD_READY)
7262 			break;
7263 	}
7264 	if ((i2ccmd & I2CCMD_READY) == 0)
7265 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7266 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7267 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7268 
7269 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7270 
7271 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7272 	return rv;
7273 }
7274 
7275 /*
7276  * wm_sgmii_writereg:	[mii interface function]
7277  *
7278  *	Write a PHY register on the SGMII.
7279  * This could be handled by the PHY layer if we didn't have to lock the
7280  * ressource ...
7281  */
7282 static void
7283 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7284 {
7285 	struct wm_softc *sc = device_private(self);
7286 	uint32_t i2ccmd;
7287 	int i;
7288 
7289 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7290 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7291 		    __func__);
7292 		return;
7293 	}
7294 
7295 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7296 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7297 	    | I2CCMD_OPCODE_WRITE;
7298 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7299 
7300 	/* Poll the ready bit */
7301 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7302 		delay(50);
7303 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7304 		if (i2ccmd & I2CCMD_READY)
7305 			break;
7306 	}
7307 	if ((i2ccmd & I2CCMD_READY) == 0)
7308 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7309 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7310 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7311 
7312 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7313 }
7314 
7315 /*
7316  * wm_gmii_82580_readreg:	[mii interface function]
7317  *
7318  *	Read a PHY register on the 82580 and I350.
7319  * This could be handled by the PHY layer if we didn't have to lock the
7320  * ressource ...
7321  */
7322 static int
7323 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7324 {
7325 	struct wm_softc *sc = device_private(self);
7326 	int sem;
7327 	int rv;
7328 
7329 	sem = swfwphysem[sc->sc_funcid];
7330 	if (wm_get_swfw_semaphore(sc, sem)) {
7331 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7332 		    __func__);
7333 		return 0;
7334 	}
7335 
7336 	rv = wm_gmii_i82544_readreg(self, phy, reg);
7337 
7338 	wm_put_swfw_semaphore(sc, sem);
7339 	return rv;
7340 }
7341 
7342 /*
7343  * wm_gmii_82580_writereg:	[mii interface function]
7344  *
7345  *	Write a PHY register on the 82580 and I350.
7346  * This could be handled by the PHY layer if we didn't have to lock the
7347  * ressource ...
7348  */
7349 static void
7350 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7351 {
7352 	struct wm_softc *sc = device_private(self);
7353 	int sem;
7354 
7355 	sem = swfwphysem[sc->sc_funcid];
7356 	if (wm_get_swfw_semaphore(sc, sem)) {
7357 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7358 		    __func__);
7359 		return;
7360 	}
7361 
7362 	wm_gmii_i82544_writereg(self, phy, reg, val);
7363 
7364 	wm_put_swfw_semaphore(sc, sem);
7365 }
7366 
7367 /*
7368  * wm_gmii_statchg:	[mii interface function]
7369  *
7370  *	Callback from MII layer when media changes.
7371  */
7372 static void
7373 wm_gmii_statchg(struct ifnet *ifp)
7374 {
7375 	struct wm_softc *sc = ifp->if_softc;
7376 	struct mii_data *mii = &sc->sc_mii;
7377 
7378 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7379 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7380 	sc->sc_fcrtl &= ~FCRTL_XONE;
7381 
7382 	/*
7383 	 * Get flow control negotiation result.
7384 	 */
7385 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7386 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7387 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7388 		mii->mii_media_active &= ~IFM_ETH_FMASK;
7389 	}
7390 
7391 	if (sc->sc_flowflags & IFM_FLOW) {
7392 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7393 			sc->sc_ctrl |= CTRL_TFCE;
7394 			sc->sc_fcrtl |= FCRTL_XONE;
7395 		}
7396 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7397 			sc->sc_ctrl |= CTRL_RFCE;
7398 	}
7399 
7400 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
7401 		DPRINTF(WM_DEBUG_LINK,
7402 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7403 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7404 	} else {
7405 		DPRINTF(WM_DEBUG_LINK,
7406 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7407 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7408 	}
7409 
7410 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7411 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7412 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7413 						 : WMREG_FCRTL, sc->sc_fcrtl);
7414 	if (sc->sc_type == WM_T_80003) {
7415 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7416 		case IFM_1000_T:
7417 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7418 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7419 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
7420 			break;
7421 		default:
7422 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7423 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7424 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
7425 			break;
7426 		}
7427 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7428 	}
7429 }
7430 
7431 /*
7432  * wm_kmrn_readreg:
7433  *
7434  *	Read a kumeran register
7435  */
7436 static int
7437 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7438 {
7439 	int rv;
7440 
7441 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7442 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7443 			aprint_error_dev(sc->sc_dev,
7444 			    "%s: failed to get semaphore\n", __func__);
7445 			return 0;
7446 		}
7447 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7448 		if (wm_get_swfwhw_semaphore(sc)) {
7449 			aprint_error_dev(sc->sc_dev,
7450 			    "%s: failed to get semaphore\n", __func__);
7451 			return 0;
7452 		}
7453 	}
7454 
7455 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7456 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7457 	    KUMCTRLSTA_REN);
7458 	delay(2);
7459 
7460 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7461 
7462 	if (sc->sc_flags == WM_F_SWFW_SYNC)
7463 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7464 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7465 		wm_put_swfwhw_semaphore(sc);
7466 
7467 	return rv;
7468 }
7469 
7470 /*
7471  * wm_kmrn_writereg:
7472  *
7473  *	Write a kumeran register
7474  */
7475 static void
7476 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7477 {
7478 
7479 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7480 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7481 			aprint_error_dev(sc->sc_dev,
7482 			    "%s: failed to get semaphore\n", __func__);
7483 			return;
7484 		}
7485 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7486 		if (wm_get_swfwhw_semaphore(sc)) {
7487 			aprint_error_dev(sc->sc_dev,
7488 			    "%s: failed to get semaphore\n", __func__);
7489 			return;
7490 		}
7491 	}
7492 
7493 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7494 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7495 	    (val & KUMCTRLSTA_MASK));
7496 
7497 	if (sc->sc_flags == WM_F_SWFW_SYNC)
7498 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7499 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7500 		wm_put_swfwhw_semaphore(sc);
7501 }
7502 
7503 static int
7504 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7505 {
7506 	uint32_t eecd = 0;
7507 
7508 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7509 	    || sc->sc_type == WM_T_82583) {
7510 		eecd = CSR_READ(sc, WMREG_EECD);
7511 
7512 		/* Isolate bits 15 & 16 */
7513 		eecd = ((eecd >> 15) & 0x03);
7514 
7515 		/* If both bits are set, device is Flash type */
7516 		if (eecd == 0x03)
7517 			return 0;
7518 	}
7519 	return 1;
7520 }
7521 
7522 static int
7523 wm_get_swsm_semaphore(struct wm_softc *sc)
7524 {
7525 	int32_t timeout;
7526 	uint32_t swsm;
7527 
7528 	/* Get the FW semaphore. */
7529 	timeout = 1000 + 1; /* XXX */
7530 	while (timeout) {
7531 		swsm = CSR_READ(sc, WMREG_SWSM);
7532 		swsm |= SWSM_SWESMBI;
7533 		CSR_WRITE(sc, WMREG_SWSM, swsm);
7534 		/* if we managed to set the bit we got the semaphore. */
7535 		swsm = CSR_READ(sc, WMREG_SWSM);
7536 		if (swsm & SWSM_SWESMBI)
7537 			break;
7538 
7539 		delay(50);
7540 		timeout--;
7541 	}
7542 
7543 	if (timeout == 0) {
7544 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7545 		/* Release semaphores */
7546 		wm_put_swsm_semaphore(sc);
7547 		return 1;
7548 	}
7549 	return 0;
7550 }
7551 
7552 static void
7553 wm_put_swsm_semaphore(struct wm_softc *sc)
7554 {
7555 	uint32_t swsm;
7556 
7557 	swsm = CSR_READ(sc, WMREG_SWSM);
7558 	swsm &= ~(SWSM_SWESMBI);
7559 	CSR_WRITE(sc, WMREG_SWSM, swsm);
7560 }
7561 
7562 static int
7563 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7564 {
7565 	uint32_t swfw_sync;
7566 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7567 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7568 	int timeout = 200;
7569 
7570 	for (timeout = 0; timeout < 200; timeout++) {
7571 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7572 			if (wm_get_swsm_semaphore(sc)) {
7573 				aprint_error_dev(sc->sc_dev,
7574 				    "%s: failed to get semaphore\n",
7575 				    __func__);
7576 				return 1;
7577 			}
7578 		}
7579 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7580 		if ((swfw_sync & (swmask | fwmask)) == 0) {
7581 			swfw_sync |= swmask;
7582 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7583 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7584 				wm_put_swsm_semaphore(sc);
7585 			return 0;
7586 		}
7587 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7588 			wm_put_swsm_semaphore(sc);
7589 		delay(5000);
7590 	}
7591 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7592 	    device_xname(sc->sc_dev), mask, swfw_sync);
7593 	return 1;
7594 }
7595 
7596 static void
7597 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7598 {
7599 	uint32_t swfw_sync;
7600 
7601 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7602 		while (wm_get_swsm_semaphore(sc) != 0)
7603 			continue;
7604 	}
7605 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7606 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7607 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7608 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7609 		wm_put_swsm_semaphore(sc);
7610 }
7611 
7612 static int
7613 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7614 {
7615 	uint32_t ext_ctrl;
7616 	int timeout = 200;
7617 
7618 	for (timeout = 0; timeout < 200; timeout++) {
7619 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7620 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7621 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7622 
7623 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7624 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7625 			return 0;
7626 		delay(5000);
7627 	}
7628 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7629 	    device_xname(sc->sc_dev), ext_ctrl);
7630 	return 1;
7631 }
7632 
7633 static void
7634 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7635 {
7636 	uint32_t ext_ctrl;
7637 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7638 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7639 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7640 }
7641 
7642 static int
7643 wm_get_hw_semaphore_82573(struct wm_softc *sc)
7644 {
7645 	int i = 0;
7646 	uint32_t reg;
7647 
7648 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7649 	do {
7650 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
7651 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
7652 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7653 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
7654 			break;
7655 		delay(2*1000);
7656 		i++;
7657 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
7658 
7659 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
7660 		wm_put_hw_semaphore_82573(sc);
7661 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
7662 		    device_xname(sc->sc_dev));
7663 		return -1;
7664 	}
7665 
7666 	return 0;
7667 }
7668 
7669 static void
7670 wm_put_hw_semaphore_82573(struct wm_softc *sc)
7671 {
7672 	uint32_t reg;
7673 
7674 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7675 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
7676 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
7677 }
7678 
7679 static int
7680 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7681 {
7682 	uint32_t eecd;
7683 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7684 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7685 	uint8_t sig_byte = 0;
7686 
7687 	switch (sc->sc_type) {
7688 	case WM_T_ICH8:
7689 	case WM_T_ICH9:
7690 		eecd = CSR_READ(sc, WMREG_EECD);
7691 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7692 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7693 			return 0;
7694 		}
7695 		/* FALLTHROUGH */
7696 	default:
7697 		/* Default to 0 */
7698 		*bank = 0;
7699 
7700 		/* Check bank 0 */
7701 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
7702 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7703 			*bank = 0;
7704 			return 0;
7705 		}
7706 
7707 		/* Check bank 1 */
7708 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
7709 		    &sig_byte);
7710 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7711 			*bank = 1;
7712 			return 0;
7713 		}
7714 	}
7715 
7716 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
7717 		device_xname(sc->sc_dev)));
7718 	return -1;
7719 }
7720 
7721 /******************************************************************************
7722  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7723  * register.
7724  *
7725  * sc - Struct containing variables accessed by shared code
7726  * offset - offset of word in the EEPROM to read
7727  * data - word read from the EEPROM
7728  * words - number of words to read
7729  *****************************************************************************/
7730 static int
7731 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7732 {
7733 	int32_t  error = 0;
7734 	uint32_t flash_bank = 0;
7735 	uint32_t act_offset = 0;
7736 	uint32_t bank_offset = 0;
7737 	uint16_t word = 0;
7738 	uint16_t i = 0;
7739 
7740 	/* We need to know which is the valid flash bank.  In the event
7741 	 * that we didn't allocate eeprom_shadow_ram, we may not be
7742 	 * managing flash_bank.  So it cannot be trusted and needs
7743 	 * to be updated with each read.
7744 	 */
7745 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7746 	if (error) {
7747 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7748 		    __func__);
7749 		flash_bank = 0;
7750 	}
7751 
7752 	/*
7753 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
7754 	 * size
7755 	 */
7756 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7757 
7758 	error = wm_get_swfwhw_semaphore(sc);
7759 	if (error) {
7760 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7761 		    __func__);
7762 		return error;
7763 	}
7764 
7765 	for (i = 0; i < words; i++) {
7766 		/* The NVM part needs a byte offset, hence * 2 */
7767 		act_offset = bank_offset + ((offset + i) * 2);
7768 		error = wm_read_ich8_word(sc, act_offset, &word);
7769 		if (error) {
7770 			aprint_error_dev(sc->sc_dev,
7771 			    "%s: failed to read NVM\n", __func__);
7772 			break;
7773 		}
7774 		data[i] = word;
7775 	}
7776 
7777 	wm_put_swfwhw_semaphore(sc);
7778 	return error;
7779 }
7780 
7781 /******************************************************************************
7782  * This function does initial flash setup so that a new read/write/erase cycle
7783  * can be started.
7784  *
7785  * sc - The pointer to the hw structure
7786  ****************************************************************************/
7787 static int32_t
7788 wm_ich8_cycle_init(struct wm_softc *sc)
7789 {
7790 	uint16_t hsfsts;
7791 	int32_t error = 1;
7792 	int32_t i     = 0;
7793 
7794 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7795 
7796 	/* May be check the Flash Des Valid bit in Hw status */
7797 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7798 		return error;
7799 	}
7800 
7801 	/* Clear FCERR in Hw status by writing 1 */
7802 	/* Clear DAEL in Hw status by writing a 1 */
7803 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7804 
7805 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7806 
7807 	/*
7808 	 * Either we should have a hardware SPI cycle in progress bit to check
7809 	 * against, in order to start a new cycle or FDONE bit should be
7810 	 * changed in the hardware so that it is 1 after harware reset, which
7811 	 * can then be used as an indication whether a cycle is in progress or
7812 	 * has been completed .. we should also have some software semaphore
7813 	 * mechanism to guard FDONE or the cycle in progress bit so that two
7814 	 * threads access to those bits can be sequentiallized or a way so that
7815 	 * 2 threads dont start the cycle at the same time
7816 	 */
7817 
7818 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7819 		/*
7820 		 * There is no cycle running at present, so we can start a
7821 		 * cycle
7822 		 */
7823 
7824 		/* Begin by setting Flash Cycle Done. */
7825 		hsfsts |= HSFSTS_DONE;
7826 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7827 		error = 0;
7828 	} else {
7829 		/*
7830 		 * otherwise poll for sometime so the current cycle has a
7831 		 * chance to end before giving up.
7832 		 */
7833 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7834 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7835 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7836 				error = 0;
7837 				break;
7838 			}
7839 			delay(1);
7840 		}
7841 		if (error == 0) {
7842 			/*
7843 			 * Successful in waiting for previous cycle to timeout,
7844 			 * now set the Flash Cycle Done.
7845 			 */
7846 			hsfsts |= HSFSTS_DONE;
7847 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7848 		}
7849 	}
7850 	return error;
7851 }
7852 
7853 /******************************************************************************
7854  * This function starts a flash cycle and waits for its completion
7855  *
7856  * sc - The pointer to the hw structure
7857  ****************************************************************************/
7858 static int32_t
7859 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7860 {
7861 	uint16_t hsflctl;
7862 	uint16_t hsfsts;
7863 	int32_t error = 1;
7864 	uint32_t i = 0;
7865 
7866 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7867 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7868 	hsflctl |= HSFCTL_GO;
7869 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7870 
7871 	/* wait till FDONE bit is set to 1 */
7872 	do {
7873 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7874 		if (hsfsts & HSFSTS_DONE)
7875 			break;
7876 		delay(1);
7877 		i++;
7878 	} while (i < timeout);
7879 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7880 		error = 0;
7881 
7882 	return error;
7883 }
7884 
7885 /******************************************************************************
7886  * Reads a byte or word from the NVM using the ICH8 flash access registers.
7887  *
7888  * sc - The pointer to the hw structure
7889  * index - The index of the byte or word to read.
7890  * size - Size of data to read, 1=byte 2=word
7891  * data - Pointer to the word to store the value read.
7892  *****************************************************************************/
7893 static int32_t
7894 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7895     uint32_t size, uint16_t* data)
7896 {
7897 	uint16_t hsfsts;
7898 	uint16_t hsflctl;
7899 	uint32_t flash_linear_address;
7900 	uint32_t flash_data = 0;
7901 	int32_t error = 1;
7902 	int32_t count = 0;
7903 
7904 	if (size < 1  || size > 2 || data == 0x0 ||
7905 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
7906 		return error;
7907 
7908 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7909 	    sc->sc_ich8_flash_base;
7910 
7911 	do {
7912 		delay(1);
7913 		/* Steps */
7914 		error = wm_ich8_cycle_init(sc);
7915 		if (error)
7916 			break;
7917 
7918 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7919 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7920 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7921 		    & HSFCTL_BCOUNT_MASK;
7922 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7923 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7924 
7925 		/*
7926 		 * Write the last 24 bits of index into Flash Linear address
7927 		 * field in Flash Address
7928 		 */
7929 		/* TODO: TBD maybe check the index against the size of flash */
7930 
7931 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7932 
7933 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7934 
7935 		/*
7936 		 * Check if FCERR is set to 1, if set to 1, clear it and try
7937 		 * the whole sequence a few more times, else read in (shift in)
7938 		 * the Flash Data0, the order is least significant byte first
7939 		 * msb to lsb
7940 		 */
7941 		if (error == 0) {
7942 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7943 			if (size == 1)
7944 				*data = (uint8_t)(flash_data & 0x000000FF);
7945 			else if (size == 2)
7946 				*data = (uint16_t)(flash_data & 0x0000FFFF);
7947 			break;
7948 		} else {
7949 			/*
7950 			 * If we've gotten here, then things are probably
7951 			 * completely hosed, but if the error condition is
7952 			 * detected, it won't hurt to give it another try...
7953 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7954 			 */
7955 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7956 			if (hsfsts & HSFSTS_ERR) {
7957 				/* Repeat for some time before giving up. */
7958 				continue;
7959 			} else if ((hsfsts & HSFSTS_DONE) == 0)
7960 				break;
7961 		}
7962 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
7963 
7964 	return error;
7965 }
7966 
7967 /******************************************************************************
7968  * Reads a single byte from the NVM using the ICH8 flash access registers.
7969  *
7970  * sc - pointer to wm_hw structure
7971  * index - The index of the byte to read.
7972  * data - Pointer to a byte to store the value read.
7973  *****************************************************************************/
7974 static int32_t
7975 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
7976 {
7977 	int32_t status;
7978 	uint16_t word = 0;
7979 
7980 	status = wm_read_ich8_data(sc, index, 1, &word);
7981 	if (status == 0)
7982 		*data = (uint8_t)word;
7983 	else
7984 		*data = 0;
7985 
7986 	return status;
7987 }
7988 
7989 /******************************************************************************
7990  * Reads a word from the NVM using the ICH8 flash access registers.
7991  *
7992  * sc - pointer to wm_hw structure
7993  * index - The starting byte index of the word to read.
7994  * data - Pointer to a word to store the value read.
7995  *****************************************************************************/
7996 static int32_t
7997 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
7998 {
7999 	int32_t status;
8000 
8001 	status = wm_read_ich8_data(sc, index, 2, data);
8002 	return status;
8003 }
8004 
8005 static int
8006 wm_check_mng_mode(struct wm_softc *sc)
8007 {
8008 	int rv;
8009 
8010 	switch (sc->sc_type) {
8011 	case WM_T_ICH8:
8012 	case WM_T_ICH9:
8013 	case WM_T_ICH10:
8014 	case WM_T_PCH:
8015 	case WM_T_PCH2:
8016 	case WM_T_PCH_LPT:
8017 		rv = wm_check_mng_mode_ich8lan(sc);
8018 		break;
8019 	case WM_T_82574:
8020 	case WM_T_82583:
8021 		rv = wm_check_mng_mode_82574(sc);
8022 		break;
8023 	case WM_T_82571:
8024 	case WM_T_82572:
8025 	case WM_T_82573:
8026 	case WM_T_80003:
8027 		rv = wm_check_mng_mode_generic(sc);
8028 		break;
8029 	default:
8030 		/* noting to do */
8031 		rv = 0;
8032 		break;
8033 	}
8034 
8035 	return rv;
8036 }
8037 
8038 static int
8039 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8040 {
8041 	uint32_t fwsm;
8042 
8043 	fwsm = CSR_READ(sc, WMREG_FWSM);
8044 
8045 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8046 		return 1;
8047 
8048 	return 0;
8049 }
8050 
8051 static int
8052 wm_check_mng_mode_82574(struct wm_softc *sc)
8053 {
8054 	uint16_t data;
8055 
8056 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8057 
8058 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
8059 		return 1;
8060 
8061 	return 0;
8062 }
8063 
8064 static int
8065 wm_check_mng_mode_generic(struct wm_softc *sc)
8066 {
8067 	uint32_t fwsm;
8068 
8069 	fwsm = CSR_READ(sc, WMREG_FWSM);
8070 
8071 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8072 		return 1;
8073 
8074 	return 0;
8075 }
8076 
8077 static int
8078 wm_enable_mng_pass_thru(struct wm_softc *sc)
8079 {
8080 	uint32_t manc, fwsm, factps;
8081 
8082 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8083 		return 0;
8084 
8085 	manc = CSR_READ(sc, WMREG_MANC);
8086 
8087 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8088 		device_xname(sc->sc_dev), manc));
8089 	if ((manc & MANC_RECV_TCO_EN) == 0)
8090 		return 0;
8091 
8092 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8093 		fwsm = CSR_READ(sc, WMREG_FWSM);
8094 		factps = CSR_READ(sc, WMREG_FACTPS);
8095 		if (((factps & FACTPS_MNGCG) == 0)
8096 		    && ((fwsm & FWSM_MODE_MASK)
8097 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8098 			return 1;
8099 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8100 		uint16_t data;
8101 
8102 		factps = CSR_READ(sc, WMREG_FACTPS);
8103 		wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8104 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8105 			device_xname(sc->sc_dev), factps, data));
8106 		if (((factps & FACTPS_MNGCG) == 0)
8107 		    && ((data & EEPROM_CFG2_MNGM_MASK)
8108 			== (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
8109 			return 1;
8110 	} else if (((manc & MANC_SMBUS_EN) != 0)
8111 	    && ((manc & MANC_ASF_EN) == 0))
8112 		return 1;
8113 
8114 	return 0;
8115 }
8116 
8117 static int
8118 wm_check_reset_block(struct wm_softc *sc)
8119 {
8120 	uint32_t reg;
8121 
8122 	switch (sc->sc_type) {
8123 	case WM_T_ICH8:
8124 	case WM_T_ICH9:
8125 	case WM_T_ICH10:
8126 	case WM_T_PCH:
8127 	case WM_T_PCH2:
8128 	case WM_T_PCH_LPT:
8129 		reg = CSR_READ(sc, WMREG_FWSM);
8130 		if ((reg & FWSM_RSPCIPHY) != 0)
8131 			return 0;
8132 		else
8133 			return -1;
8134 		break;
8135 	case WM_T_82571:
8136 	case WM_T_82572:
8137 	case WM_T_82573:
8138 	case WM_T_82574:
8139 	case WM_T_82583:
8140 	case WM_T_80003:
8141 		reg = CSR_READ(sc, WMREG_MANC);
8142 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8143 			return -1;
8144 		else
8145 			return 0;
8146 		break;
8147 	default:
8148 		/* no problem */
8149 		break;
8150 	}
8151 
8152 	return 0;
8153 }
8154 
8155 static void
8156 wm_get_hw_control(struct wm_softc *sc)
8157 {
8158 	uint32_t reg;
8159 
8160 	switch (sc->sc_type) {
8161 	case WM_T_82573:
8162 		reg = CSR_READ(sc, WMREG_SWSM);
8163 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8164 		break;
8165 	case WM_T_82571:
8166 	case WM_T_82572:
8167 	case WM_T_82574:
8168 	case WM_T_82583:
8169 	case WM_T_80003:
8170 	case WM_T_ICH8:
8171 	case WM_T_ICH9:
8172 	case WM_T_ICH10:
8173 	case WM_T_PCH:
8174 	case WM_T_PCH2:
8175 	case WM_T_PCH_LPT:
8176 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8177 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8178 		break;
8179 	default:
8180 		break;
8181 	}
8182 }
8183 
8184 static void
8185 wm_release_hw_control(struct wm_softc *sc)
8186 {
8187 	uint32_t reg;
8188 
8189 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8190 		return;
8191 
8192 	if (sc->sc_type == WM_T_82573) {
8193 		reg = CSR_READ(sc, WMREG_SWSM);
8194 		reg &= ~SWSM_DRV_LOAD;
8195 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8196 	} else {
8197 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8198 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8199 	}
8200 }
8201 
8202 /* XXX Currently TBI only */
8203 static int
8204 wm_check_for_link(struct wm_softc *sc)
8205 {
8206 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8207 	uint32_t rxcw;
8208 	uint32_t ctrl;
8209 	uint32_t status;
8210 	uint32_t sig;
8211 
8212 	rxcw = CSR_READ(sc, WMREG_RXCW);
8213 	ctrl = CSR_READ(sc, WMREG_CTRL);
8214 	status = CSR_READ(sc, WMREG_STATUS);
8215 
8216 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8217 
8218 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8219 		device_xname(sc->sc_dev), __func__,
8220 		((ctrl & CTRL_SWDPIN(1)) == sig),
8221 		((status & STATUS_LU) != 0),
8222 		((rxcw & RXCW_C) != 0)
8223 		    ));
8224 
8225 	/*
8226 	 * SWDPIN   LU RXCW
8227 	 *      0    0    0
8228 	 *      0    0    1	(should not happen)
8229 	 *      0    1    0	(should not happen)
8230 	 *      0    1    1	(should not happen)
8231 	 *      1    0    0	Disable autonego and force linkup
8232 	 *      1    0    1	got /C/ but not linkup yet
8233 	 *      1    1    0	(linkup)
8234 	 *      1    1    1	If IFM_AUTO, back to autonego
8235 	 *
8236 	 */
8237 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
8238 	    && ((status & STATUS_LU) == 0)
8239 	    && ((rxcw & RXCW_C) == 0)) {
8240 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8241 			__func__));
8242 		sc->sc_tbi_linkup = 0;
8243 		/* Disable auto-negotiation in the TXCW register */
8244 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8245 
8246 		/*
8247 		 * Force link-up and also force full-duplex.
8248 		 *
8249 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
8250 		 * so we should update sc->sc_ctrl
8251 		 */
8252 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8253 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8254 	} else if (((status & STATUS_LU) != 0)
8255 	    && ((rxcw & RXCW_C) != 0)
8256 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8257 		sc->sc_tbi_linkup = 1;
8258 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8259 			__func__));
8260 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8261 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8262 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8263 	    && ((rxcw & RXCW_C) != 0)) {
8264 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
8265 	} else {
8266 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8267 			status));
8268 	}
8269 
8270 	return 0;
8271 }
8272 
8273 /* Work-around for 82566 Kumeran PCS lock loss */
8274 static void
8275 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8276 {
8277 	int miistatus, active, i;
8278 	int reg;
8279 
8280 	miistatus = sc->sc_mii.mii_media_status;
8281 
8282 	/* If the link is not up, do nothing */
8283 	if ((miistatus & IFM_ACTIVE) != 0)
8284 		return;
8285 
8286 	active = sc->sc_mii.mii_media_active;
8287 
8288 	/* Nothing to do if the link is other than 1Gbps */
8289 	if (IFM_SUBTYPE(active) != IFM_1000_T)
8290 		return;
8291 
8292 	for (i = 0; i < 10; i++) {
8293 		/* read twice */
8294 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8295 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8296 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8297 			goto out;	/* GOOD! */
8298 
8299 		/* Reset the PHY */
8300 		wm_gmii_reset(sc);
8301 		delay(5*1000);
8302 	}
8303 
8304 	/* Disable GigE link negotiation */
8305 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
8306 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8307 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8308 
8309 	/*
8310 	 * Call gig speed drop workaround on Gig disable before accessing
8311 	 * any PHY registers.
8312 	 */
8313 	wm_gig_downshift_workaround_ich8lan(sc);
8314 
8315 out:
8316 	return;
8317 }
8318 
8319 /* WOL from S5 stops working */
8320 static void
8321 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8322 {
8323 	uint16_t kmrn_reg;
8324 
8325 	/* Only for igp3 */
8326 	if (sc->sc_phytype == WMPHY_IGP_3) {
8327 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8328 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8329 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8330 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8331 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8332 	}
8333 }
8334 
8335 #ifdef WM_WOL
8336 /* Power down workaround on D3 */
8337 static void
8338 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8339 {
8340 	uint32_t reg;
8341 	int i;
8342 
8343 	for (i = 0; i < 2; i++) {
8344 		/* Disable link */
8345 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8346 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8347 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8348 
8349 		/*
8350 		 * Call gig speed drop workaround on Gig disable before
8351 		 * accessing any PHY registers
8352 		 */
8353 		if (sc->sc_type == WM_T_ICH8)
8354 			wm_gig_downshift_workaround_ich8lan(sc);
8355 
8356 		/* Write VR power-down enable */
8357 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8358 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8359 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8360 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8361 
8362 		/* Read it back and test */
8363 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8364 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8365 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8366 			break;
8367 
8368 		/* Issue PHY reset and repeat at most one more time */
8369 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8370 	}
8371 }
8372 #endif /* WM_WOL */
8373 
8374 /*
8375  * Workaround for pch's PHYs
8376  * XXX should be moved to new PHY driver?
8377  */
8378 static void
8379 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8380 {
8381 	if (sc->sc_phytype == WMPHY_82577)
8382 		wm_set_mdio_slow_mode_hv(sc);
8383 
8384 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8385 
8386 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8387 
8388 	/* 82578 */
8389 	if (sc->sc_phytype == WMPHY_82578) {
8390 		/* PCH rev. < 3 */
8391 		if (sc->sc_rev < 3) {
8392 			/* XXX 6 bit shift? Why? Is it page2? */
8393 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8394 			    0x66c0);
8395 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8396 			    0xffff);
8397 		}
8398 
8399 		/* XXX phy rev. < 2 */
8400 	}
8401 
8402 	/* Select page 0 */
8403 
8404 	/* XXX acquire semaphore */
8405 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8406 	/* XXX release semaphore */
8407 
8408 	/*
8409 	 * Configure the K1 Si workaround during phy reset assuming there is
8410 	 * link so that it disables K1 if link is in 1Gbps.
8411 	 */
8412 	wm_k1_gig_workaround_hv(sc, 1);
8413 }
8414 
8415 static void
8416 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8417 {
8418 
8419 	wm_set_mdio_slow_mode_hv(sc);
8420 }
8421 
8422 static void
8423 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8424 {
8425 	int k1_enable = sc->sc_nvm_k1_enabled;
8426 
8427 	/* XXX acquire semaphore */
8428 
8429 	if (link) {
8430 		k1_enable = 0;
8431 
8432 		/* Link stall fix for link up */
8433 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8434 	} else {
8435 		/* Link stall fix for link down */
8436 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8437 	}
8438 
8439 	wm_configure_k1_ich8lan(sc, k1_enable);
8440 
8441 	/* XXX release semaphore */
8442 }
8443 
8444 static void
8445 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8446 {
8447 	uint32_t reg;
8448 
8449 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8450 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8451 	    reg | HV_KMRN_MDIO_SLOW);
8452 }
8453 
8454 static void
8455 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8456 {
8457 	uint32_t ctrl, ctrl_ext, tmp;
8458 	uint16_t kmrn_reg;
8459 
8460 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8461 
8462 	if (k1_enable)
8463 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8464 	else
8465 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8466 
8467 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8468 
8469 	delay(20);
8470 
8471 	ctrl = CSR_READ(sc, WMREG_CTRL);
8472 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8473 
8474 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8475 	tmp |= CTRL_FRCSPD;
8476 
8477 	CSR_WRITE(sc, WMREG_CTRL, tmp);
8478 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8479 	delay(20);
8480 
8481 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
8482 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8483 	delay(20);
8484 }
8485 
8486 static void
8487 wm_smbustopci(struct wm_softc *sc)
8488 {
8489 	uint32_t fwsm;
8490 
8491 	fwsm = CSR_READ(sc, WMREG_FWSM);
8492 	if (((fwsm & FWSM_FW_VALID) == 0)
8493 	    && ((wm_check_reset_block(sc) == 0))) {
8494 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8495 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8496 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8497 		delay(10);
8498 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8499 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8500 		delay(50*1000);
8501 
8502 		/*
8503 		 * Gate automatic PHY configuration by hardware on non-managed
8504 		 * 82579
8505 		 */
8506 		if (sc->sc_type == WM_T_PCH2)
8507 			wm_gate_hw_phy_config_ich8lan(sc, 1);
8508 	}
8509 }
8510 
8511 static void
8512 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8513 {
8514 	uint32_t gcr;
8515 	pcireg_t ctrl2;
8516 
8517 	gcr = CSR_READ(sc, WMREG_GCR);
8518 
8519 	/* Only take action if timeout value is defaulted to 0 */
8520 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8521 		goto out;
8522 
8523 	if ((gcr & GCR_CAP_VER2) == 0) {
8524 		gcr |= GCR_CMPL_TMOUT_10MS;
8525 		goto out;
8526 	}
8527 
8528 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8529 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
8530 	ctrl2 |= WM_PCIE_DCSR2_16MS;
8531 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8532 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8533 
8534 out:
8535 	/* Disable completion timeout resend */
8536 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
8537 
8538 	CSR_WRITE(sc, WMREG_GCR, gcr);
8539 }
8540 
8541 /* special case - for 82575 - need to do manual init ... */
8542 static void
8543 wm_reset_init_script_82575(struct wm_softc *sc)
8544 {
8545 	/*
8546 	 * remark: this is untested code - we have no board without EEPROM
8547 	 *  same setup as mentioned int the freeBSD driver for the i82575
8548 	 */
8549 
8550 	/* SerDes configuration via SERDESCTRL */
8551 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8552 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8553 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8554 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8555 
8556 	/* CCM configuration via CCMCTL register */
8557 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8558 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8559 
8560 	/* PCIe lanes configuration */
8561 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8562 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8563 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8564 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8565 
8566 	/* PCIe PLL Configuration */
8567 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8568 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8569 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8570 }
8571 
8572 static void
8573 wm_init_manageability(struct wm_softc *sc)
8574 {
8575 
8576 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8577 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8578 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8579 
8580 		/* disabl hardware interception of ARP */
8581 		manc &= ~MANC_ARP_EN;
8582 
8583 		/* enable receiving management packets to the host */
8584 		if (sc->sc_type >= WM_T_82571) {
8585 			manc |= MANC_EN_MNG2HOST;
8586 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8587 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8588 
8589 		}
8590 
8591 		CSR_WRITE(sc, WMREG_MANC, manc);
8592 	}
8593 }
8594 
8595 static void
8596 wm_release_manageability(struct wm_softc *sc)
8597 {
8598 
8599 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8600 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8601 
8602 		manc |= MANC_ARP_EN;
8603 		if (sc->sc_type >= WM_T_82571)
8604 			manc &= ~MANC_EN_MNG2HOST;
8605 
8606 		CSR_WRITE(sc, WMREG_MANC, manc);
8607 	}
8608 }
8609 
8610 static void
8611 wm_get_wakeup(struct wm_softc *sc)
8612 {
8613 
8614 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8615 	switch (sc->sc_type) {
8616 	case WM_T_82573:
8617 	case WM_T_82583:
8618 		sc->sc_flags |= WM_F_HAS_AMT;
8619 		/* FALLTHROUGH */
8620 	case WM_T_80003:
8621 	case WM_T_82541:
8622 	case WM_T_82547:
8623 	case WM_T_82571:
8624 	case WM_T_82572:
8625 	case WM_T_82574:
8626 	case WM_T_82575:
8627 	case WM_T_82576:
8628 	case WM_T_82580:
8629 	case WM_T_82580ER:
8630 	case WM_T_I350:
8631 	case WM_T_I354:
8632 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8633 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8634 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8635 		break;
8636 	case WM_T_ICH8:
8637 	case WM_T_ICH9:
8638 	case WM_T_ICH10:
8639 	case WM_T_PCH:
8640 	case WM_T_PCH2:
8641 	case WM_T_PCH_LPT:
8642 		sc->sc_flags |= WM_F_HAS_AMT;
8643 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8644 		break;
8645 	default:
8646 		break;
8647 	}
8648 
8649 	/* 1: HAS_MANAGE */
8650 	if (wm_enable_mng_pass_thru(sc) != 0)
8651 		sc->sc_flags |= WM_F_HAS_MANAGE;
8652 
8653 #ifdef WM_DEBUG
8654 	printf("\n");
8655 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8656 		printf("HAS_AMT,");
8657 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8658 		printf("ARC_SUBSYS_VALID,");
8659 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8660 		printf("ASF_FIRMWARE_PRES,");
8661 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8662 		printf("HAS_MANAGE,");
8663 	printf("\n");
8664 #endif
8665 	/*
8666 	 * Note that the WOL flags is set after the resetting of the eeprom
8667 	 * stuff
8668 	 */
8669 }
8670 
8671 #ifdef WM_WOL
8672 /* WOL in the newer chipset interfaces (pchlan) */
8673 static void
8674 wm_enable_phy_wakeup(struct wm_softc *sc)
8675 {
8676 #if 0
8677 	uint16_t preg;
8678 
8679 	/* Copy MAC RARs to PHY RARs */
8680 
8681 	/* Copy MAC MTA to PHY MTA */
8682 
8683 	/* Configure PHY Rx Control register */
8684 
8685 	/* Enable PHY wakeup in MAC register */
8686 
8687 	/* Configure and enable PHY wakeup in PHY registers */
8688 
8689 	/* Activate PHY wakeup */
8690 
8691 	/* XXX */
8692 #endif
8693 }
8694 
8695 static void
8696 wm_enable_wakeup(struct wm_softc *sc)
8697 {
8698 	uint32_t reg, pmreg;
8699 	pcireg_t pmode;
8700 
8701 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8702 		&pmreg, NULL) == 0)
8703 		return;
8704 
8705 	/* Advertise the wakeup capability */
8706 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8707 	    | CTRL_SWDPIN(3));
8708 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8709 
8710 	/* ICH workaround */
8711 	switch (sc->sc_type) {
8712 	case WM_T_ICH8:
8713 	case WM_T_ICH9:
8714 	case WM_T_ICH10:
8715 	case WM_T_PCH:
8716 	case WM_T_PCH2:
8717 	case WM_T_PCH_LPT:
8718 		/* Disable gig during WOL */
8719 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8720 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8721 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8722 		if (sc->sc_type == WM_T_PCH)
8723 			wm_gmii_reset(sc);
8724 
8725 		/* Power down workaround */
8726 		if (sc->sc_phytype == WMPHY_82577) {
8727 			struct mii_softc *child;
8728 
8729 			/* Assume that the PHY is copper */
8730 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
8731 			if (child->mii_mpd_rev <= 2)
8732 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8733 				    (768 << 5) | 25, 0x0444); /* magic num */
8734 		}
8735 		break;
8736 	default:
8737 		break;
8738 	}
8739 
8740 	/* Keep the laser running on fiber adapters */
8741 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8742 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8743 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8744 		reg |= CTRL_EXT_SWDPIN(3);
8745 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8746 	}
8747 
8748 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8749 #if 0	/* for the multicast packet */
8750 	reg |= WUFC_MC;
8751 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8752 #endif
8753 
8754 	if (sc->sc_type == WM_T_PCH) {
8755 		wm_enable_phy_wakeup(sc);
8756 	} else {
8757 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8758 		CSR_WRITE(sc, WMREG_WUFC, reg);
8759 	}
8760 
8761 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8762 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8763 		|| (sc->sc_type == WM_T_PCH2))
8764 		    && (sc->sc_phytype == WMPHY_IGP_3))
8765 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8766 
8767 	/* Request PME */
8768 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8769 #if 0
8770 	/* Disable WOL */
8771 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8772 #else
8773 	/* For WOL */
8774 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8775 #endif
8776 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8777 }
8778 #endif /* WM_WOL */
8779 
8780 static bool
8781 wm_suspend(device_t self, const pmf_qual_t *qual)
8782 {
8783 	struct wm_softc *sc = device_private(self);
8784 
8785 	wm_release_manageability(sc);
8786 	wm_release_hw_control(sc);
8787 #ifdef WM_WOL
8788 	wm_enable_wakeup(sc);
8789 #endif
8790 
8791 	return true;
8792 }
8793 
8794 static bool
8795 wm_resume(device_t self, const pmf_qual_t *qual)
8796 {
8797 	struct wm_softc *sc = device_private(self);
8798 
8799 	wm_init_manageability(sc);
8800 
8801 	return true;
8802 }
8803 
8804 static void
8805 wm_set_eee_i350(struct wm_softc * sc)
8806 {
8807 	uint32_t ipcnfg, eeer;
8808 
8809 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8810 	eeer = CSR_READ(sc, WMREG_EEER);
8811 
8812 	if ((sc->sc_flags & WM_F_EEE) != 0) {
8813 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8814 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8815 		    | EEER_LPI_FC);
8816 	} else {
8817 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8818 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8819 		    | EEER_LPI_FC);
8820 	}
8821 
8822 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8823 	CSR_WRITE(sc, WMREG_EEER, eeer);
8824 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8825 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8826 }
8827