xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 48fb7bfab72acd4281a53bbee5ccf3f809019e75)
1 /*	$NetBSD: if_wm.c,v 1.266 2014/01/07 13:14:39 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Rework how parameters are loaded from the EEPROM.
76  */
77 
78 #include <sys/cdefs.h>
79 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.266 2014/01/07 13:14:39 msaitoh Exp $");
80 
81 #include <sys/param.h>
82 #include <sys/systm.h>
83 #include <sys/callout.h>
84 #include <sys/mbuf.h>
85 #include <sys/malloc.h>
86 #include <sys/kernel.h>
87 #include <sys/socket.h>
88 #include <sys/ioctl.h>
89 #include <sys/errno.h>
90 #include <sys/device.h>
91 #include <sys/queue.h>
92 #include <sys/syslog.h>
93 
94 #include <sys/rnd.h>
95 
96 #include <net/if.h>
97 #include <net/if_dl.h>
98 #include <net/if_media.h>
99 #include <net/if_ether.h>
100 
101 #include <net/bpf.h>
102 
103 #include <netinet/in.h>			/* XXX for struct ip */
104 #include <netinet/in_systm.h>		/* XXX for struct ip */
105 #include <netinet/ip.h>			/* XXX for struct ip */
106 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
107 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
108 
109 #include <sys/bus.h>
110 #include <sys/intr.h>
111 #include <machine/endian.h>
112 
113 #include <dev/mii/mii.h>
114 #include <dev/mii/miivar.h>
115 #include <dev/mii/miidevs.h>
116 #include <dev/mii/mii_bitbang.h>
117 #include <dev/mii/ikphyreg.h>
118 #include <dev/mii/igphyreg.h>
119 #include <dev/mii/igphyvar.h>
120 #include <dev/mii/inbmphyreg.h>
121 
122 #include <dev/pci/pcireg.h>
123 #include <dev/pci/pcivar.h>
124 #include <dev/pci/pcidevs.h>
125 
126 #include <dev/pci/if_wmreg.h>
127 #include <dev/pci/if_wmvar.h>
128 
129 #ifdef WM_DEBUG
130 #define	WM_DEBUG_LINK		0x01
131 #define	WM_DEBUG_TX		0x02
132 #define	WM_DEBUG_RX		0x04
133 #define	WM_DEBUG_GMII		0x08
134 #define	WM_DEBUG_MANAGE		0x10
135 #define	WM_DEBUG_NVM		0x20
136 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
137     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
138 
139 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
140 #else
141 #define	DPRINTF(x, y)	/* nothing */
142 #endif /* WM_DEBUG */
143 
144 /*
145  * Transmit descriptor list size.  Due to errata, we can only have
146  * 256 hardware descriptors in the ring on < 82544, but we use 4096
147  * on >= 82544.  We tell the upper layers that they can queue a lot
148  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
149  * of them at a time.
150  *
151  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
152  * chains containing many small mbufs have been observed in zero-copy
153  * situations with jumbo frames.
154  */
155 #define	WM_NTXSEGS		256
156 #define	WM_IFQUEUELEN		256
157 #define	WM_TXQUEUELEN_MAX	64
158 #define	WM_TXQUEUELEN_MAX_82547	16
159 #define	WM_TXQUEUELEN(sc)	((sc)->sc_txnum)
160 #define	WM_TXQUEUELEN_MASK(sc)	(WM_TXQUEUELEN(sc) - 1)
161 #define	WM_TXQUEUE_GC(sc)	(WM_TXQUEUELEN(sc) / 8)
162 #define	WM_NTXDESC_82542	256
163 #define	WM_NTXDESC_82544	4096
164 #define	WM_NTXDESC(sc)		((sc)->sc_ntxdesc)
165 #define	WM_NTXDESC_MASK(sc)	(WM_NTXDESC(sc) - 1)
166 #define	WM_TXDESCSIZE(sc)	(WM_NTXDESC(sc) * sizeof(wiseman_txdesc_t))
167 #define	WM_NEXTTX(sc, x)	(((x) + 1) & WM_NTXDESC_MASK(sc))
168 #define	WM_NEXTTXS(sc, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(sc))
169 
170 #define	WM_MAXTXDMA		round_page(IP_MAXPACKET) /* for TSO */
171 
172 /*
173  * Receive descriptor list size.  We have one Rx buffer for normal
174  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
175  * packet.  We allocate 256 receive descriptors, each with a 2k
176  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
177  */
178 #define	WM_NRXDESC		256
179 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
180 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
181 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
182 
183 /*
184  * Control structures are DMA'd to the i82542 chip.  We allocate them in
185  * a single clump that maps to a single DMA segment to make several things
186  * easier.
187  */
188 struct wm_control_data_82544 {
189 	/*
190 	 * The receive descriptors.
191 	 */
192 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
193 
194 	/*
195 	 * The transmit descriptors.  Put these at the end, because
196 	 * we might use a smaller number of them.
197 	 */
198 	union {
199 		wiseman_txdesc_t wcdu_txdescs[WM_NTXDESC_82544];
200 		nq_txdesc_t      wcdu_nq_txdescs[WM_NTXDESC_82544];
201 	} wdc_u;
202 };
203 
204 struct wm_control_data_82542 {
205 	wiseman_rxdesc_t wcd_rxdescs[WM_NRXDESC];
206 	wiseman_txdesc_t wcd_txdescs[WM_NTXDESC_82542];
207 };
208 
209 #define	WM_CDOFF(x)	offsetof(struct wm_control_data_82544, x)
210 #define	WM_CDTXOFF(x)	WM_CDOFF(wdc_u.wcdu_txdescs[(x)])
211 #define	WM_CDRXOFF(x)	WM_CDOFF(wcd_rxdescs[(x)])
212 
213 /*
214  * Software state for transmit jobs.
215  */
216 struct wm_txsoft {
217 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
218 	bus_dmamap_t txs_dmamap;	/* our DMA map */
219 	int txs_firstdesc;		/* first descriptor in packet */
220 	int txs_lastdesc;		/* last descriptor in packet */
221 	int txs_ndesc;			/* # of descriptors used */
222 };
223 
224 /*
225  * Software state for receive buffers.  Each descriptor gets a
226  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
227  * more than one buffer, we chain them together.
228  */
229 struct wm_rxsoft {
230 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
231 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
232 };
233 
234 #define WM_LINKUP_TIMEOUT	50
235 
236 static uint16_t swfwphysem[] = {
237 	SWFW_PHY0_SM,
238 	SWFW_PHY1_SM,
239 	SWFW_PHY2_SM,
240 	SWFW_PHY3_SM
241 };
242 
243 /*
244  * Software state per device.
245  */
246 struct wm_softc {
247 	device_t sc_dev;		/* generic device information */
248 	bus_space_tag_t sc_st;		/* bus space tag */
249 	bus_space_handle_t sc_sh;	/* bus space handle */
250 	bus_size_t sc_ss;		/* bus space size */
251 	bus_space_tag_t sc_iot;		/* I/O space tag */
252 	bus_space_handle_t sc_ioh;	/* I/O space handle */
253 	bus_size_t sc_ios;		/* I/O space size */
254 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
255 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
256 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
257 
258 	struct ethercom sc_ethercom;	/* ethernet common data */
259 	struct mii_data sc_mii;		/* MII/media information */
260 
261 	pci_chipset_tag_t sc_pc;
262 	pcitag_t sc_pcitag;
263 	int sc_bus_speed;		/* PCI/PCIX bus speed */
264 	int sc_pcixe_capoff;		/* PCI[Xe] capability register offset */
265 
266 	const struct wm_product *sc_wmp; /* Pointer to the wm_product entry */
267 	wm_chip_type sc_type;		/* MAC type */
268 	int sc_rev;			/* MAC revision */
269 	wm_phy_type sc_phytype;		/* PHY type */
270 	int sc_funcid;			/* unit number of the chip (0 to 3) */
271 	int sc_flags;			/* flags; see below */
272 	int sc_if_flags;		/* last if_flags */
273 	int sc_flowflags;		/* 802.3x flow control flags */
274 	int sc_align_tweak;
275 
276 	void *sc_ih;			/* interrupt cookie */
277 	callout_t sc_tick_ch;		/* tick callout */
278 
279 	int sc_ee_addrbits;		/* EEPROM address bits */
280 	int sc_ich8_flash_base;
281 	int sc_ich8_flash_bank_size;
282 	int sc_nvm_k1_enabled;
283 
284 	/*
285 	 * Software state for the transmit and receive descriptors.
286 	 */
287 	int sc_txnum;			/* must be a power of two */
288 	struct wm_txsoft sc_txsoft[WM_TXQUEUELEN_MAX];
289 	struct wm_rxsoft sc_rxsoft[WM_NRXDESC];
290 
291 	/*
292 	 * Control data structures.
293 	 */
294 	int sc_ntxdesc;			/* must be a power of two */
295 	struct wm_control_data_82544 *sc_control_data;
296 	bus_dmamap_t sc_cddmamap;	/* control data DMA map */
297 	bus_dma_segment_t sc_cd_seg;	/* control data segment */
298 	int sc_cd_rseg;			/* real number of control segment */
299 	size_t sc_cd_size;		/* control data size */
300 #define	sc_cddma	sc_cddmamap->dm_segs[0].ds_addr
301 #define	sc_txdescs	sc_control_data->wdc_u.wcdu_txdescs
302 #define	sc_nq_txdescs	sc_control_data->wdc_u.wcdu_nq_txdescs
303 #define	sc_rxdescs	sc_control_data->wcd_rxdescs
304 
305 #ifdef WM_EVENT_COUNTERS
306 	/* Event counters. */
307 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
308 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
309 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
310 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
311 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
312 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
313 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
314 
315 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
316 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
317 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
318 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
319 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
320 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
321 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
322 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
323 
324 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
325 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped (too many segs) */
326 
327 	struct evcnt sc_ev_tu;		/* Tx underrun */
328 
329 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
330 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
331 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
332 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
333 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
334 #endif /* WM_EVENT_COUNTERS */
335 
336 	bus_addr_t sc_tdt_reg;		/* offset of TDT register */
337 
338 	int	sc_txfree;		/* number of free Tx descriptors */
339 	int	sc_txnext;		/* next ready Tx descriptor */
340 
341 	int	sc_txsfree;		/* number of free Tx jobs */
342 	int	sc_txsnext;		/* next free Tx job */
343 	int	sc_txsdirty;		/* dirty Tx jobs */
344 
345 	/* These 5 variables are used only on the 82547. */
346 	int	sc_txfifo_size;		/* Tx FIFO size */
347 	int	sc_txfifo_head;		/* current head of FIFO */
348 	uint32_t sc_txfifo_addr;	/* internal address of start of FIFO */
349 	int	sc_txfifo_stall;	/* Tx FIFO is stalled */
350 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
351 
352 	bus_addr_t sc_rdt_reg;		/* offset of RDT register */
353 
354 	int	sc_rxptr;		/* next ready Rx descriptor/queue ent */
355 	int	sc_rxdiscard;
356 	int	sc_rxlen;
357 	struct mbuf *sc_rxhead;
358 	struct mbuf *sc_rxtail;
359 	struct mbuf **sc_rxtailp;
360 
361 	uint32_t sc_ctrl;		/* prototype CTRL register */
362 #if 0
363 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
364 #endif
365 	uint32_t sc_icr;		/* prototype interrupt bits */
366 	uint32_t sc_itr;		/* prototype intr throttling reg */
367 	uint32_t sc_tctl;		/* prototype TCTL register */
368 	uint32_t sc_rctl;		/* prototype RCTL register */
369 	uint32_t sc_txcw;		/* prototype TXCW register */
370 	uint32_t sc_tipg;		/* prototype TIPG register */
371 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
372 	uint32_t sc_pba;		/* prototype PBA register */
373 
374 	int sc_tbi_linkup;		/* TBI link status */
375 	int sc_tbi_anegticks;		/* autonegotiation ticks */
376 	int sc_tbi_ticks;		/* tbi ticks */
377 	int sc_tbi_nrxcfg;		/* count of ICR_RXCFG */
378 	int sc_tbi_lastnrxcfg;		/* count of ICR_RXCFG (on last tick) */
379 
380 	int sc_mchash_type;		/* multicast filter offset */
381 
382 	krndsource_t rnd_source;	/* random source */
383 };
384 
385 #define	WM_RXCHAIN_RESET(sc)						\
386 do {									\
387 	(sc)->sc_rxtailp = &(sc)->sc_rxhead;				\
388 	*(sc)->sc_rxtailp = NULL;					\
389 	(sc)->sc_rxlen = 0;						\
390 } while (/*CONSTCOND*/0)
391 
392 #define	WM_RXCHAIN_LINK(sc, m)						\
393 do {									\
394 	*(sc)->sc_rxtailp = (sc)->sc_rxtail = (m);			\
395 	(sc)->sc_rxtailp = &(m)->m_next;				\
396 } while (/*CONSTCOND*/0)
397 
398 #ifdef WM_EVENT_COUNTERS
399 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
400 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
401 #else
402 #define	WM_EVCNT_INCR(ev)	/* nothing */
403 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
404 #endif
405 
406 #define	CSR_READ(sc, reg)						\
407 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
408 #define	CSR_WRITE(sc, reg, val)						\
409 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
410 #define	CSR_WRITE_FLUSH(sc)						\
411 	(void) CSR_READ((sc), WMREG_STATUS)
412 
413 #define ICH8_FLASH_READ32(sc, reg) \
414 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
415 #define ICH8_FLASH_WRITE32(sc, reg, data) \
416 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
417 
418 #define ICH8_FLASH_READ16(sc, reg) \
419 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
420 #define ICH8_FLASH_WRITE16(sc, reg, data) \
421 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
422 
423 #define	WM_CDTXADDR(sc, x)	((sc)->sc_cddma + WM_CDTXOFF((x)))
424 #define	WM_CDRXADDR(sc, x)	((sc)->sc_cddma + WM_CDRXOFF((x)))
425 
426 #define	WM_CDTXADDR_LO(sc, x)	(WM_CDTXADDR((sc), (x)) & 0xffffffffU)
427 #define	WM_CDTXADDR_HI(sc, x)						\
428 	(sizeof(bus_addr_t) == 8 ?					\
429 	 (uint64_t)WM_CDTXADDR((sc), (x)) >> 32 : 0)
430 
431 #define	WM_CDRXADDR_LO(sc, x)	(WM_CDRXADDR((sc), (x)) & 0xffffffffU)
432 #define	WM_CDRXADDR_HI(sc, x)						\
433 	(sizeof(bus_addr_t) == 8 ?					\
434 	 (uint64_t)WM_CDRXADDR((sc), (x)) >> 32 : 0)
435 
436 #define	WM_CDTXSYNC(sc, x, n, ops)					\
437 do {									\
438 	int __x, __n;							\
439 									\
440 	__x = (x);							\
441 	__n = (n);							\
442 									\
443 	/* If it will wrap around, sync to the end of the ring. */	\
444 	if ((__x + __n) > WM_NTXDESC(sc)) {				\
445 		bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,	\
446 		    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) *		\
447 		    (WM_NTXDESC(sc) - __x), (ops));			\
448 		__n -= (WM_NTXDESC(sc) - __x);				\
449 		__x = 0;						\
450 	}								\
451 									\
452 	/* Now sync whatever is left. */				\
453 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
454 	    WM_CDTXOFF(__x), sizeof(wiseman_txdesc_t) * __n, (ops));	\
455 } while (/*CONSTCOND*/0)
456 
457 #define	WM_CDRXSYNC(sc, x, ops)						\
458 do {									\
459 	bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap,		\
460 	   WM_CDRXOFF((x)), sizeof(wiseman_rxdesc_t), (ops));		\
461 } while (/*CONSTCOND*/0)
462 
463 #define	WM_INIT_RXDESC(sc, x)						\
464 do {									\
465 	struct wm_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)];		\
466 	wiseman_rxdesc_t *__rxd = &(sc)->sc_rxdescs[(x)];		\
467 	struct mbuf *__m = __rxs->rxs_mbuf;				\
468 									\
469 	/*								\
470 	 * Note: We scoot the packet forward 2 bytes in the buffer	\
471 	 * so that the payload after the Ethernet header is aligned	\
472 	 * to a 4-byte boundary.					\
473 	 *								\
474 	 * XXX BRAINDAMAGE ALERT!					\
475 	 * The stupid chip uses the same size for every buffer, which	\
476 	 * is set in the Receive Control register.  We are using the 2K	\
477 	 * size option, but what we REALLY want is (2K - 2)!  For this	\
478 	 * reason, we can't "scoot" packets longer than the standard	\
479 	 * Ethernet MTU.  On strict-alignment platforms, if the total	\
480 	 * size exceeds (2K - 2) we set align_tweak to 0 and let	\
481 	 * the upper layer copy the headers.				\
482 	 */								\
483 	__m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak;	\
484 									\
485 	wm_set_dma_addr(&__rxd->wrx_addr,				\
486 	    __rxs->rxs_dmamap->dm_segs[0].ds_addr + (sc)->sc_align_tweak); \
487 	__rxd->wrx_len = 0;						\
488 	__rxd->wrx_cksum = 0;						\
489 	__rxd->wrx_status = 0;						\
490 	__rxd->wrx_errors = 0;						\
491 	__rxd->wrx_special = 0;						\
492 	WM_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \
493 									\
494 	CSR_WRITE((sc), (sc)->sc_rdt_reg, (x));				\
495 } while (/*CONSTCOND*/0)
496 
497 static void	wm_start(struct ifnet *);
498 static void	wm_nq_start(struct ifnet *);
499 static void	wm_watchdog(struct ifnet *);
500 static int	wm_ifflags_cb(struct ethercom *);
501 static int	wm_ioctl(struct ifnet *, u_long, void *);
502 static int	wm_init(struct ifnet *);
503 static void	wm_stop(struct ifnet *, int);
504 static bool	wm_suspend(device_t, const pmf_qual_t *);
505 static bool	wm_resume(device_t, const pmf_qual_t *);
506 
507 static void	wm_reset(struct wm_softc *);
508 static void	wm_rxdrain(struct wm_softc *);
509 static int	wm_add_rxbuf(struct wm_softc *, int);
510 static int	wm_read_eeprom(struct wm_softc *, int, int, u_int16_t *);
511 static int	wm_read_eeprom_eerd(struct wm_softc *, int, int, u_int16_t *);
512 static int	wm_validate_eeprom_checksum(struct wm_softc *);
513 static int	wm_check_alt_mac_addr(struct wm_softc *);
514 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
515 static void	wm_tick(void *);
516 
517 static void	wm_set_filter(struct wm_softc *);
518 static void	wm_set_vlan(struct wm_softc *);
519 
520 static int	wm_intr(void *);
521 static void	wm_txintr(struct wm_softc *);
522 static void	wm_rxintr(struct wm_softc *);
523 static void	wm_linkintr(struct wm_softc *, uint32_t);
524 
525 static void	wm_tbi_mediainit(struct wm_softc *);
526 static int	wm_tbi_mediachange(struct ifnet *);
527 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
528 
529 static void	wm_tbi_set_linkled(struct wm_softc *);
530 static void	wm_tbi_check_link(struct wm_softc *);
531 
532 static void	wm_gmii_reset(struct wm_softc *);
533 
534 static int	wm_gmii_i82543_readreg(device_t, int, int);
535 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
536 static int	wm_gmii_i82544_readreg(device_t, int, int);
537 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
538 static int	wm_gmii_i80003_readreg(device_t, int, int);
539 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
540 static int	wm_gmii_bm_readreg(device_t, int, int);
541 static void	wm_gmii_bm_writereg(device_t, int, int, int);
542 static int	wm_gmii_hv_readreg(device_t, int, int);
543 static void	wm_gmii_hv_writereg(device_t, int, int, int);
544 static int	wm_gmii_82580_readreg(device_t, int, int);
545 static void	wm_gmii_82580_writereg(device_t, int, int, int);
546 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
547 static int	wm_sgmii_readreg(device_t, int, int);
548 static void	wm_sgmii_writereg(device_t, int, int, int);
549 
550 static void	wm_gmii_statchg(struct ifnet *);
551 
552 static int	wm_get_phy_id_82575(struct wm_softc *);
553 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
554 static int	wm_gmii_mediachange(struct ifnet *);
555 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
556 
557 static int	wm_kmrn_readreg(struct wm_softc *, int);
558 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
559 
560 static void	wm_set_spiaddrbits(struct wm_softc *);
561 static int	wm_match(device_t, cfdata_t, void *);
562 static void	wm_attach(device_t, device_t, void *);
563 static int	wm_detach(device_t, int);
564 static int	wm_is_onboard_nvm_eeprom(struct wm_softc *);
565 static void	wm_get_auto_rd_done(struct wm_softc *);
566 static void	wm_lan_init_done(struct wm_softc *);
567 static void	wm_get_cfg_done(struct wm_softc *);
568 static int	wm_get_swsm_semaphore(struct wm_softc *);
569 static void	wm_put_swsm_semaphore(struct wm_softc *);
570 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
571 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
572 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
573 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
574 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
575 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
576 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
577 
578 static int	wm_read_eeprom_ich8(struct wm_softc *, int, int, uint16_t *);
579 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
580 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
581 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t,
582 		     uint32_t, uint16_t *);
583 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
584 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
585 static void	wm_82547_txfifo_stall(void *);
586 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
587 static int	wm_check_mng_mode(struct wm_softc *);
588 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
589 static int	wm_check_mng_mode_82574(struct wm_softc *);
590 static int	wm_check_mng_mode_generic(struct wm_softc *);
591 static int	wm_enable_mng_pass_thru(struct wm_softc *);
592 static int	wm_check_reset_block(struct wm_softc *);
593 static void	wm_get_hw_control(struct wm_softc *);
594 static int	wm_check_for_link(struct wm_softc *);
595 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
596 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
597 #ifdef WM_WOL
598 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
599 #endif
600 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
601 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
602 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
603 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
604 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
605 static void	wm_smbustopci(struct wm_softc *);
606 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
607 static void	wm_reset_init_script_82575(struct wm_softc *);
608 static void	wm_release_manageability(struct wm_softc *);
609 static void	wm_release_hw_control(struct wm_softc *);
610 static void	wm_get_wakeup(struct wm_softc *);
611 #ifdef WM_WOL
612 static void	wm_enable_phy_wakeup(struct wm_softc *);
613 static void	wm_enable_wakeup(struct wm_softc *);
614 #endif
615 static void	wm_init_manageability(struct wm_softc *);
616 static void	wm_set_eee_i350(struct wm_softc *);
617 
618 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
619     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
620 
621 /*
622  * Devices supported by this driver.
623  */
624 static const struct wm_product {
625 	pci_vendor_id_t		wmp_vendor;
626 	pci_product_id_t	wmp_product;
627 	const char		*wmp_name;
628 	wm_chip_type		wmp_type;
629 	int			wmp_flags;
630 #define	WMP_F_1000X		0x01
631 #define	WMP_F_1000T		0x02
632 #define	WMP_F_SERDES		0x04
633 } wm_products[] = {
634 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
635 	  "Intel i82542 1000BASE-X Ethernet",
636 	  WM_T_82542_2_1,	WMP_F_1000X },
637 
638 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
639 	  "Intel i82543GC 1000BASE-X Ethernet",
640 	  WM_T_82543,		WMP_F_1000X },
641 
642 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
643 	  "Intel i82543GC 1000BASE-T Ethernet",
644 	  WM_T_82543,		WMP_F_1000T },
645 
646 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
647 	  "Intel i82544EI 1000BASE-T Ethernet",
648 	  WM_T_82544,		WMP_F_1000T },
649 
650 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
651 	  "Intel i82544EI 1000BASE-X Ethernet",
652 	  WM_T_82544,		WMP_F_1000X },
653 
654 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
655 	  "Intel i82544GC 1000BASE-T Ethernet",
656 	  WM_T_82544,		WMP_F_1000T },
657 
658 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
659 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
660 	  WM_T_82544,		WMP_F_1000T },
661 
662 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
663 	  "Intel i82540EM 1000BASE-T Ethernet",
664 	  WM_T_82540,		WMP_F_1000T },
665 
666 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
667 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
668 	  WM_T_82540,		WMP_F_1000T },
669 
670 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
671 	  "Intel i82540EP 1000BASE-T Ethernet",
672 	  WM_T_82540,		WMP_F_1000T },
673 
674 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
675 	  "Intel i82540EP 1000BASE-T Ethernet",
676 	  WM_T_82540,		WMP_F_1000T },
677 
678 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
679 	  "Intel i82540EP 1000BASE-T Ethernet",
680 	  WM_T_82540,		WMP_F_1000T },
681 
682 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
683 	  "Intel i82545EM 1000BASE-T Ethernet",
684 	  WM_T_82545,		WMP_F_1000T },
685 
686 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
687 	  "Intel i82545GM 1000BASE-T Ethernet",
688 	  WM_T_82545_3,		WMP_F_1000T },
689 
690 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
691 	  "Intel i82545GM 1000BASE-X Ethernet",
692 	  WM_T_82545_3,		WMP_F_1000X },
693 #if 0
694 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
695 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
696 	  WM_T_82545_3,		WMP_F_SERDES },
697 #endif
698 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
699 	  "Intel i82546EB 1000BASE-T Ethernet",
700 	  WM_T_82546,		WMP_F_1000T },
701 
702 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
703 	  "Intel i82546EB 1000BASE-T Ethernet",
704 	  WM_T_82546,		WMP_F_1000T },
705 
706 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
707 	  "Intel i82545EM 1000BASE-X Ethernet",
708 	  WM_T_82545,		WMP_F_1000X },
709 
710 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
711 	  "Intel i82546EB 1000BASE-X Ethernet",
712 	  WM_T_82546,		WMP_F_1000X },
713 
714 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
715 	  "Intel i82546GB 1000BASE-T Ethernet",
716 	  WM_T_82546_3,		WMP_F_1000T },
717 
718 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
719 	  "Intel i82546GB 1000BASE-X Ethernet",
720 	  WM_T_82546_3,		WMP_F_1000X },
721 #if 0
722 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
723 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
724 	  WM_T_82546_3,		WMP_F_SERDES },
725 #endif
726 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
727 	  "i82546GB quad-port Gigabit Ethernet",
728 	  WM_T_82546_3,		WMP_F_1000T },
729 
730 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
731 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
732 	  WM_T_82546_3,		WMP_F_1000T },
733 
734 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
735 	  "Intel PRO/1000MT (82546GB)",
736 	  WM_T_82546_3,		WMP_F_1000T },
737 
738 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
739 	  "Intel i82541EI 1000BASE-T Ethernet",
740 	  WM_T_82541,		WMP_F_1000T },
741 
742 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
743 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
744 	  WM_T_82541,		WMP_F_1000T },
745 
746 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
747 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
748 	  WM_T_82541,		WMP_F_1000T },
749 
750 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
751 	  "Intel i82541ER 1000BASE-T Ethernet",
752 	  WM_T_82541_2,		WMP_F_1000T },
753 
754 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
755 	  "Intel i82541GI 1000BASE-T Ethernet",
756 	  WM_T_82541_2,		WMP_F_1000T },
757 
758 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
759 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
760 	  WM_T_82541_2,		WMP_F_1000T },
761 
762 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
763 	  "Intel i82541PI 1000BASE-T Ethernet",
764 	  WM_T_82541_2,		WMP_F_1000T },
765 
766 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
767 	  "Intel i82547EI 1000BASE-T Ethernet",
768 	  WM_T_82547,		WMP_F_1000T },
769 
770 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
771 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
772 	  WM_T_82547,		WMP_F_1000T },
773 
774 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
775 	  "Intel i82547GI 1000BASE-T Ethernet",
776 	  WM_T_82547_2,		WMP_F_1000T },
777 
778 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
779 	  "Intel PRO/1000 PT (82571EB)",
780 	  WM_T_82571,		WMP_F_1000T },
781 
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
783 	  "Intel PRO/1000 PF (82571EB)",
784 	  WM_T_82571,		WMP_F_1000X },
785 #if 0
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
787 	  "Intel PRO/1000 PB (82571EB)",
788 	  WM_T_82571,		WMP_F_SERDES },
789 #endif
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
791 	  "Intel PRO/1000 QT (82571EB)",
792 	  WM_T_82571,		WMP_F_1000T },
793 
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
795 	  "Intel i82572EI 1000baseT Ethernet",
796 	  WM_T_82572,		WMP_F_1000T },
797 
798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
799 	  "Intel PRO/1000 PT Quad Port Server Adapter",
800 	  WM_T_82571,		WMP_F_1000T, },
801 
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
803 	  "Intel i82572EI 1000baseX Ethernet",
804 	  WM_T_82572,		WMP_F_1000X },
805 #if 0
806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
807 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
808 	  WM_T_82572,		WMP_F_SERDES },
809 #endif
810 
811 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
812 	  "Intel i82572EI 1000baseT Ethernet",
813 	  WM_T_82572,		WMP_F_1000T },
814 
815 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
816 	  "Intel i82573E",
817 	  WM_T_82573,		WMP_F_1000T },
818 
819 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
820 	  "Intel i82573E IAMT",
821 	  WM_T_82573,		WMP_F_1000T },
822 
823 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
824 	  "Intel i82573L Gigabit Ethernet",
825 	  WM_T_82573,		WMP_F_1000T },
826 
827 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
828 	  "Intel i82574L",
829 	  WM_T_82574,		WMP_F_1000T },
830 
831 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
832 	  "Intel i82583V",
833 	  WM_T_82583,		WMP_F_1000T },
834 
835 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
836 	  "i80003 dual 1000baseT Ethernet",
837 	  WM_T_80003,		WMP_F_1000T },
838 
839 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
840 	  "i80003 dual 1000baseX Ethernet",
841 	  WM_T_80003,		WMP_F_1000T },
842 #if 0
843 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
844 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
845 	  WM_T_80003,		WMP_F_SERDES },
846 #endif
847 
848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
849 	  "Intel i80003 1000baseT Ethernet",
850 	  WM_T_80003,		WMP_F_1000T },
851 #if 0
852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
853 	  "Intel i80003 Gigabit Ethernet (SERDES)",
854 	  WM_T_80003,		WMP_F_SERDES },
855 #endif
856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
857 	  "Intel i82801H (M_AMT) LAN Controller",
858 	  WM_T_ICH8,		WMP_F_1000T },
859 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
860 	  "Intel i82801H (AMT) LAN Controller",
861 	  WM_T_ICH8,		WMP_F_1000T },
862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
863 	  "Intel i82801H LAN Controller",
864 	  WM_T_ICH8,		WMP_F_1000T },
865 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
866 	  "Intel i82801H (IFE) LAN Controller",
867 	  WM_T_ICH8,		WMP_F_1000T },
868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
869 	  "Intel i82801H (M) LAN Controller",
870 	  WM_T_ICH8,		WMP_F_1000T },
871 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
872 	  "Intel i82801H IFE (GT) LAN Controller",
873 	  WM_T_ICH8,		WMP_F_1000T },
874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
875 	  "Intel i82801H IFE (G) LAN Controller",
876 	  WM_T_ICH8,		WMP_F_1000T },
877 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
878 	  "82801I (AMT) LAN Controller",
879 	  WM_T_ICH9,		WMP_F_1000T },
880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
881 	  "82801I LAN Controller",
882 	  WM_T_ICH9,		WMP_F_1000T },
883 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
884 	  "82801I (G) LAN Controller",
885 	  WM_T_ICH9,		WMP_F_1000T },
886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
887 	  "82801I (GT) LAN Controller",
888 	  WM_T_ICH9,		WMP_F_1000T },
889 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
890 	  "82801I (C) LAN Controller",
891 	  WM_T_ICH9,		WMP_F_1000T },
892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
893 	  "82801I mobile LAN Controller",
894 	  WM_T_ICH9,		WMP_F_1000T },
895 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
896 	  "82801I mobile (V) LAN Controller",
897 	  WM_T_ICH9,		WMP_F_1000T },
898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
899 	  "82801I mobile (AMT) LAN Controller",
900 	  WM_T_ICH9,		WMP_F_1000T },
901 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
902 	  "82567LM-4 LAN Controller",
903 	  WM_T_ICH9,		WMP_F_1000T },
904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
905 	  "82567V-3 LAN Controller",
906 	  WM_T_ICH9,		WMP_F_1000T },
907 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
908 	  "82567LM-2 LAN Controller",
909 	  WM_T_ICH10,		WMP_F_1000T },
910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
911 	  "82567LF-2 LAN Controller",
912 	  WM_T_ICH10,		WMP_F_1000T },
913 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
914 	  "82567LM-3 LAN Controller",
915 	  WM_T_ICH10,		WMP_F_1000T },
916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
917 	  "82567LF-3 LAN Controller",
918 	  WM_T_ICH10,		WMP_F_1000T },
919 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
920 	  "82567V-2 LAN Controller",
921 	  WM_T_ICH10,		WMP_F_1000T },
922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
923 	  "82567V-3? LAN Controller",
924 	  WM_T_ICH10,		WMP_F_1000T },
925 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
926 	  "HANKSVILLE LAN Controller",
927 	  WM_T_ICH10,		WMP_F_1000T },
928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
929 	  "PCH LAN (82577LM) Controller",
930 	  WM_T_PCH,		WMP_F_1000T },
931 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
932 	  "PCH LAN (82577LC) Controller",
933 	  WM_T_PCH,		WMP_F_1000T },
934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
935 	  "PCH LAN (82578DM) Controller",
936 	  WM_T_PCH,		WMP_F_1000T },
937 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
938 	  "PCH LAN (82578DC) Controller",
939 	  WM_T_PCH,		WMP_F_1000T },
940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
941 	  "PCH2 LAN (82579LM) Controller",
942 	  WM_T_PCH2,		WMP_F_1000T },
943 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
944 	  "PCH2 LAN (82579V) Controller",
945 	  WM_T_PCH2,		WMP_F_1000T },
946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
947 	  "82575EB dual-1000baseT Ethernet",
948 	  WM_T_82575,		WMP_F_1000T },
949 #if 0
950 	/*
951 	 * not sure if WMP_F_1000X or WMP_F_SERDES - we do not have it - so
952 	 * disabled for now ...
953 	 */
954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
955 	  "82575EB dual-1000baseX Ethernet (SERDES)",
956 	  WM_T_82575,		WMP_F_SERDES },
957 #endif
958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
959 	  "82575GB quad-1000baseT Ethernet",
960 	  WM_T_82575,		WMP_F_1000T },
961 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
962 	  "82575GB quad-1000baseT Ethernet (PM)",
963 	  WM_T_82575,		WMP_F_1000T },
964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
965 	  "82576 1000BaseT Ethernet",
966 	  WM_T_82576,		WMP_F_1000T },
967 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
968 	  "82576 1000BaseX Ethernet",
969 	  WM_T_82576,		WMP_F_1000X },
970 #if 0
971 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
972 	  "82576 gigabit Ethernet (SERDES)",
973 	  WM_T_82576,		WMP_F_SERDES },
974 #endif
975 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
976 	  "82576 quad-1000BaseT Ethernet",
977 	  WM_T_82576,		WMP_F_1000T },
978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
979 	  "82576 gigabit Ethernet",
980 	  WM_T_82576,		WMP_F_1000T },
981 #if 0
982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
983 	  "82576 gigabit Ethernet (SERDES)",
984 	  WM_T_82576,		WMP_F_SERDES },
985 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
986 	  "82576 quad-gigabit Ethernet (SERDES)",
987 	  WM_T_82576,		WMP_F_SERDES },
988 #endif
989 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
990 	  "82580 1000BaseT Ethernet",
991 	  WM_T_82580,		WMP_F_1000T },
992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
993 	  "82580 1000BaseX Ethernet",
994 	  WM_T_82580,		WMP_F_1000X },
995 #if 0
996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
997 	  "82580 1000BaseT Ethernet (SERDES)",
998 	  WM_T_82580,		WMP_F_SERDES },
999 #endif
1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1001 	  "82580 gigabit Ethernet (SGMII)",
1002 	  WM_T_82580,		WMP_F_1000T },
1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1004 	  "82580 dual-1000BaseT Ethernet",
1005 	  WM_T_82580,		WMP_F_1000T },
1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER,
1007 	  "82580 1000BaseT Ethernet",
1008 	  WM_T_82580ER,		WMP_F_1000T },
1009 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_ER_DUAL,
1010 	  "82580 dual-1000BaseT Ethernet",
1011 	  WM_T_82580ER,		WMP_F_1000T },
1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1013 	  "82580 quad-1000BaseX Ethernet",
1014 	  WM_T_82580,		WMP_F_1000X },
1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1016 	  "I350 Gigabit Network Connection",
1017 	  WM_T_I350,		WMP_F_1000T },
1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1019 	  "I350 Gigabit Fiber Network Connection",
1020 	  WM_T_I350,		WMP_F_1000X },
1021 #if 0
1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1023 	  "I350 Gigabit Backplane Connection",
1024 	  WM_T_I350,		WMP_F_SERDES },
1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1026 	  "I350 Gigabit Connection",
1027 	  WM_T_I350,		WMP_F_1000T },
1028 #endif
1029 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1030 	  "I354 Gigabit Connection",
1031 	  WM_T_I354,		WMP_F_1000T },
1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1033 	  "I210-T1 Ethernet Server Adapter",
1034 	  WM_T_I210,		WMP_F_1000T },
1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1036 	  "I210 Ethernet (Copper OEM)",
1037 	  WM_T_I210,		WMP_F_1000T },
1038 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1039 	  "I210 Ethernet (Copper IT)",
1040 	  WM_T_I210,		WMP_F_1000T },
1041 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1042 	  "I210 Gigabit Ethernet (Fiber)",
1043 	  WM_T_I210,		WMP_F_1000X },
1044 #if 0
1045 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1046 	  "I210 Gigabit Ethernet (SERDES)",
1047 	  WM_T_I210,		WMP_F_SERDES },
1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1049 	  "I210 Gigabit Ethernet (SGMII)",
1050 	  WM_T_I210,		WMP_F_SERDES },
1051 #endif
1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1053 	  "I211 Ethernet (COPPER)",
1054 	  WM_T_I211,		WMP_F_1000T },
1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1056 	  "I217 V Ethernet Connection",
1057 	  WM_T_PCH_LPT,		WMP_F_1000T },
1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1059 	  "I217 LM Ethernet Connection",
1060 	  WM_T_PCH_LPT,		WMP_F_1000T },
1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1062 	  "I218 V Ethernet Connection",
1063 	  WM_T_PCH_LPT,		WMP_F_1000T },
1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1065 	  "I218 LM Ethernet Connection",
1066 	  WM_T_PCH_LPT,		WMP_F_1000T },
1067 	{ 0,			0,
1068 	  NULL,
1069 	  0,			0 },
1070 };
1071 
1072 #ifdef WM_EVENT_COUNTERS
1073 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1074 #endif /* WM_EVENT_COUNTERS */
1075 
1076 #if 0 /* Not currently used */
1077 static inline uint32_t
1078 wm_io_read(struct wm_softc *sc, int reg)
1079 {
1080 
1081 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1082 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1083 }
1084 #endif
1085 
1086 static inline void
1087 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1088 {
1089 
1090 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1091 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1092 }
1093 
1094 static inline void
1095 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1096     uint32_t data)
1097 {
1098 	uint32_t regval;
1099 	int i;
1100 
1101 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1102 
1103 	CSR_WRITE(sc, reg, regval);
1104 
1105 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1106 		delay(5);
1107 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1108 			break;
1109 	}
1110 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1111 		aprint_error("%s: WARNING: i82575 reg 0x%08x setup did not indicate ready\n",
1112 		    device_xname(sc->sc_dev), reg);
1113 	}
1114 }
1115 
1116 static inline void
1117 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1118 {
1119 	wa->wa_low = htole32(v & 0xffffffffU);
1120 	if (sizeof(bus_addr_t) == 8)
1121 		wa->wa_high = htole32((uint64_t) v >> 32);
1122 	else
1123 		wa->wa_high = 0;
1124 }
1125 
1126 static void
1127 wm_set_spiaddrbits(struct wm_softc *sc)
1128 {
1129 	uint32_t reg;
1130 
1131 	sc->sc_flags |= WM_F_EEPROM_SPI;
1132 	reg = CSR_READ(sc, WMREG_EECD);
1133 	sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
1134 }
1135 
1136 static const struct wm_product *
1137 wm_lookup(const struct pci_attach_args *pa)
1138 {
1139 	const struct wm_product *wmp;
1140 
1141 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1142 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1143 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1144 			return wmp;
1145 	}
1146 	return NULL;
1147 }
1148 
1149 static int
1150 wm_match(device_t parent, cfdata_t cf, void *aux)
1151 {
1152 	struct pci_attach_args *pa = aux;
1153 
1154 	if (wm_lookup(pa) != NULL)
1155 		return 1;
1156 
1157 	return 0;
1158 }
1159 
1160 static void
1161 wm_attach(device_t parent, device_t self, void *aux)
1162 {
1163 	struct wm_softc *sc = device_private(self);
1164 	struct pci_attach_args *pa = aux;
1165 	prop_dictionary_t dict;
1166 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1167 	pci_chipset_tag_t pc = pa->pa_pc;
1168 	pci_intr_handle_t ih;
1169 	const char *intrstr = NULL;
1170 	const char *eetype, *xname;
1171 	bus_space_tag_t memt;
1172 	bus_space_handle_t memh;
1173 	bus_size_t memsize;
1174 	int memh_valid;
1175 	int i, error;
1176 	const struct wm_product *wmp;
1177 	prop_data_t ea;
1178 	prop_number_t pn;
1179 	uint8_t enaddr[ETHER_ADDR_LEN];
1180 	uint16_t cfg1, cfg2, swdpin, io3;
1181 	pcireg_t preg, memtype;
1182 	uint16_t eeprom_data, apme_mask;
1183 	uint32_t reg;
1184 
1185 	sc->sc_dev = self;
1186 	callout_init(&sc->sc_tick_ch, 0);
1187 
1188 	sc->sc_wmp = wmp = wm_lookup(pa);
1189 	if (wmp == NULL) {
1190 		printf("\n");
1191 		panic("wm_attach: impossible");
1192 	}
1193 
1194 	sc->sc_pc = pa->pa_pc;
1195 	sc->sc_pcitag = pa->pa_tag;
1196 
1197 	if (pci_dma64_available(pa))
1198 		sc->sc_dmat = pa->pa_dmat64;
1199 	else
1200 		sc->sc_dmat = pa->pa_dmat;
1201 
1202 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
1203 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1204 
1205 	sc->sc_type = wmp->wmp_type;
1206 	if (sc->sc_type < WM_T_82543) {
1207 		if (sc->sc_rev < 2) {
1208 			aprint_error_dev(sc->sc_dev,
1209 			    "i82542 must be at least rev. 2\n");
1210 			return;
1211 		}
1212 		if (sc->sc_rev < 3)
1213 			sc->sc_type = WM_T_82542_2_0;
1214 	}
1215 
1216 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1217 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1218 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1219 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1220 		sc->sc_flags |= WM_F_NEWQUEUE;
1221 
1222 	/* Set device properties (mactype) */
1223 	dict = device_properties(sc->sc_dev);
1224 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1225 
1226 	/*
1227 	 * Map the device.  All devices support memory-mapped acccess,
1228 	 * and it is really required for normal operation.
1229 	 */
1230 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1231 	switch (memtype) {
1232 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1233 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1234 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1235 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1236 		break;
1237 	default:
1238 		memh_valid = 0;
1239 		break;
1240 	}
1241 
1242 	if (memh_valid) {
1243 		sc->sc_st = memt;
1244 		sc->sc_sh = memh;
1245 		sc->sc_ss = memsize;
1246 	} else {
1247 		aprint_error_dev(sc->sc_dev,
1248 		    "unable to map device registers\n");
1249 		return;
1250 	}
1251 
1252 	/*
1253 	 * In addition, i82544 and later support I/O mapped indirect
1254 	 * register access.  It is not desirable (nor supported in
1255 	 * this driver) to use it for normal operation, though it is
1256 	 * required to work around bugs in some chip versions.
1257 	 */
1258 	if (sc->sc_type >= WM_T_82544) {
1259 		/* First we have to find the I/O BAR. */
1260 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1261 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1262 			if (memtype == PCI_MAPREG_TYPE_IO)
1263 				break;
1264 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1265 			    PCI_MAPREG_MEM_TYPE_64BIT)
1266 				i += 4;	/* skip high bits, too */
1267 		}
1268 		if (i < PCI_MAPREG_END) {
1269 			/*
1270 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1271 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1272 			 * It's no problem because newer chips has no this
1273 			 * bug.
1274 			 *
1275 			 * The i8254x doesn't apparently respond when the
1276 			 * I/O BAR is 0, which looks somewhat like it's not
1277 			 * been configured.
1278 			 */
1279 			preg = pci_conf_read(pc, pa->pa_tag, i);
1280 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1281 				aprint_error_dev(sc->sc_dev,
1282 				    "WARNING: I/O BAR at zero.\n");
1283 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1284 					0, &sc->sc_iot, &sc->sc_ioh,
1285 					NULL, &sc->sc_ios) == 0) {
1286 				sc->sc_flags |= WM_F_IOH_VALID;
1287 			} else {
1288 				aprint_error_dev(sc->sc_dev,
1289 				    "WARNING: unable to map I/O space\n");
1290 			}
1291 		}
1292 
1293 	}
1294 
1295 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1296 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1297 	preg |= PCI_COMMAND_MASTER_ENABLE;
1298 	if (sc->sc_type < WM_T_82542_2_1)
1299 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1300 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1301 
1302 	/* power up chip */
1303 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1304 	    NULL)) && error != EOPNOTSUPP) {
1305 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1306 		return;
1307 	}
1308 
1309 	/*
1310 	 * Map and establish our interrupt.
1311 	 */
1312 	if (pci_intr_map(pa, &ih)) {
1313 		aprint_error_dev(sc->sc_dev, "unable to map interrupt\n");
1314 		return;
1315 	}
1316 	intrstr = pci_intr_string(pc, ih);
1317 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, wm_intr, sc);
1318 	if (sc->sc_ih == NULL) {
1319 		aprint_error_dev(sc->sc_dev, "unable to establish interrupt");
1320 		if (intrstr != NULL)
1321 			aprint_error(" at %s", intrstr);
1322 		aprint_error("\n");
1323 		return;
1324 	}
1325 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
1326 
1327 	/*
1328 	 * Check the function ID (unit number of the chip).
1329 	 */
1330 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1331 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1332 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1333 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
1334 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1335 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1336 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1337 	else
1338 		sc->sc_funcid = 0;
1339 
1340 	/*
1341 	 * Determine a few things about the bus we're connected to.
1342 	 */
1343 	if (sc->sc_type < WM_T_82543) {
1344 		/* We don't really know the bus characteristics here. */
1345 		sc->sc_bus_speed = 33;
1346 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1347 		/*
1348 		 * CSA (Communication Streaming Architecture) is about as fast
1349 		 * a 32-bit 66MHz PCI Bus.
1350 		 */
1351 		sc->sc_flags |= WM_F_CSA;
1352 		sc->sc_bus_speed = 66;
1353 		aprint_verbose_dev(sc->sc_dev,
1354 		    "Communication Streaming Architecture\n");
1355 		if (sc->sc_type == WM_T_82547) {
1356 			callout_init(&sc->sc_txfifo_ch, 0);
1357 			callout_setfunc(&sc->sc_txfifo_ch,
1358 					wm_82547_txfifo_stall, sc);
1359 			aprint_verbose_dev(sc->sc_dev,
1360 			    "using 82547 Tx FIFO stall work-around\n");
1361 		}
1362 	} else if (sc->sc_type >= WM_T_82571) {
1363 		sc->sc_flags |= WM_F_PCIE;
1364 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1365 		    && (sc->sc_type != WM_T_ICH10)
1366 		    && (sc->sc_type != WM_T_PCH)
1367 		    && (sc->sc_type != WM_T_PCH2)
1368 		    && (sc->sc_type != WM_T_PCH_LPT)) {
1369 			sc->sc_flags |= WM_F_EEPROM_SEMAPHORE;
1370 			/* ICH* and PCH* have no PCIe capability registers */
1371 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1372 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1373 				NULL) == 0)
1374 				aprint_error_dev(sc->sc_dev,
1375 				    "unable to find PCIe capability\n");
1376 		}
1377 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1378 	} else {
1379 		reg = CSR_READ(sc, WMREG_STATUS);
1380 		if (reg & STATUS_BUS64)
1381 			sc->sc_flags |= WM_F_BUS64;
1382 		if ((reg & STATUS_PCIX_MODE) != 0) {
1383 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1384 
1385 			sc->sc_flags |= WM_F_PCIX;
1386 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1387 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1388 				aprint_error_dev(sc->sc_dev,
1389 				    "unable to find PCIX capability\n");
1390 			else if (sc->sc_type != WM_T_82545_3 &&
1391 				 sc->sc_type != WM_T_82546_3) {
1392 				/*
1393 				 * Work around a problem caused by the BIOS
1394 				 * setting the max memory read byte count
1395 				 * incorrectly.
1396 				 */
1397 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1398 				    sc->sc_pcixe_capoff + PCIX_CMD);
1399 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1400 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1401 
1402 				bytecnt =
1403 				    (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1404 				    PCIX_CMD_BYTECNT_SHIFT;
1405 				maxb =
1406 				    (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1407 				    PCIX_STATUS_MAXB_SHIFT;
1408 				if (bytecnt > maxb) {
1409 					aprint_verbose_dev(sc->sc_dev,
1410 					    "resetting PCI-X MMRBC: %d -> %d\n",
1411 					    512 << bytecnt, 512 << maxb);
1412 					pcix_cmd = (pcix_cmd &
1413 					    ~PCIX_CMD_BYTECNT_MASK) |
1414 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1415 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1416 					    sc->sc_pcixe_capoff + PCIX_CMD,
1417 					    pcix_cmd);
1418 				}
1419 			}
1420 		}
1421 		/*
1422 		 * The quad port adapter is special; it has a PCIX-PCIX
1423 		 * bridge on the board, and can run the secondary bus at
1424 		 * a higher speed.
1425 		 */
1426 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1427 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1428 								      : 66;
1429 		} else if (sc->sc_flags & WM_F_PCIX) {
1430 			switch (reg & STATUS_PCIXSPD_MASK) {
1431 			case STATUS_PCIXSPD_50_66:
1432 				sc->sc_bus_speed = 66;
1433 				break;
1434 			case STATUS_PCIXSPD_66_100:
1435 				sc->sc_bus_speed = 100;
1436 				break;
1437 			case STATUS_PCIXSPD_100_133:
1438 				sc->sc_bus_speed = 133;
1439 				break;
1440 			default:
1441 				aprint_error_dev(sc->sc_dev,
1442 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1443 				    reg & STATUS_PCIXSPD_MASK);
1444 				sc->sc_bus_speed = 66;
1445 				break;
1446 			}
1447 		} else
1448 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1449 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1450 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1451 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1452 	}
1453 
1454 	/*
1455 	 * Allocate the control data structures, and create and load the
1456 	 * DMA map for it.
1457 	 *
1458 	 * NOTE: All Tx descriptors must be in the same 4G segment of
1459 	 * memory.  So must Rx descriptors.  We simplify by allocating
1460 	 * both sets within the same 4G segment.
1461 	 */
1462 	WM_NTXDESC(sc) = sc->sc_type < WM_T_82544 ?
1463 	    WM_NTXDESC_82542 : WM_NTXDESC_82544;
1464 	sc->sc_cd_size = sc->sc_type < WM_T_82544 ?
1465 	    sizeof(struct wm_control_data_82542) :
1466 	    sizeof(struct wm_control_data_82544);
1467 	if ((error = bus_dmamem_alloc(sc->sc_dmat, sc->sc_cd_size, PAGE_SIZE,
1468 		    (bus_size_t) 0x100000000ULL, &sc->sc_cd_seg, 1,
1469 		    &sc->sc_cd_rseg, 0)) != 0) {
1470 		aprint_error_dev(sc->sc_dev,
1471 		    "unable to allocate control data, error = %d\n",
1472 		    error);
1473 		goto fail_0;
1474 	}
1475 
1476 	if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cd_seg,
1477 		    sc->sc_cd_rseg, sc->sc_cd_size,
1478 		    (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) {
1479 		aprint_error_dev(sc->sc_dev,
1480 		    "unable to map control data, error = %d\n", error);
1481 		goto fail_1;
1482 	}
1483 
1484 	if ((error = bus_dmamap_create(sc->sc_dmat, sc->sc_cd_size, 1,
1485 		    sc->sc_cd_size, 0, 0, &sc->sc_cddmamap)) != 0) {
1486 		aprint_error_dev(sc->sc_dev,
1487 		    "unable to create control data DMA map, error = %d\n",
1488 		    error);
1489 		goto fail_2;
1490 	}
1491 
1492 	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
1493 		    sc->sc_control_data, sc->sc_cd_size, NULL, 0)) != 0) {
1494 		aprint_error_dev(sc->sc_dev,
1495 		    "unable to load control data DMA map, error = %d\n",
1496 		    error);
1497 		goto fail_3;
1498 	}
1499 
1500 	/*
1501 	 * Create the transmit buffer DMA maps.
1502 	 */
1503 	WM_TXQUEUELEN(sc) =
1504 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
1505 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
1506 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
1507 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
1508 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
1509 			    &sc->sc_txsoft[i].txs_dmamap)) != 0) {
1510 			aprint_error_dev(sc->sc_dev,
1511 			    "unable to create Tx DMA map %d, error = %d\n",
1512 			    i, error);
1513 			goto fail_4;
1514 		}
1515 	}
1516 
1517 	/*
1518 	 * Create the receive buffer DMA maps.
1519 	 */
1520 	for (i = 0; i < WM_NRXDESC; i++) {
1521 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
1522 			    MCLBYTES, 0, 0,
1523 			    &sc->sc_rxsoft[i].rxs_dmamap)) != 0) {
1524 			aprint_error_dev(sc->sc_dev,
1525 			    "unable to create Rx DMA map %d error = %d\n",
1526 			    i, error);
1527 			goto fail_5;
1528 		}
1529 		sc->sc_rxsoft[i].rxs_mbuf = NULL;
1530 	}
1531 
1532 	/* clear interesting stat counters */
1533 	CSR_READ(sc, WMREG_COLC);
1534 	CSR_READ(sc, WMREG_RXERRC);
1535 
1536 	/* get PHY control from SMBus to PCIe */
1537 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1538 	    || (sc->sc_type == WM_T_PCH_LPT))
1539 		wm_smbustopci(sc);
1540 
1541 	/*
1542 	 * Reset the chip to a known state.
1543 	 */
1544 	wm_reset(sc);
1545 
1546 	/*
1547 	 * Get some information about the EEPROM.
1548 	 */
1549 	switch (sc->sc_type) {
1550 	case WM_T_82542_2_0:
1551 	case WM_T_82542_2_1:
1552 	case WM_T_82543:
1553 	case WM_T_82544:
1554 		/* Microwire */
1555 		sc->sc_ee_addrbits = 6;
1556 		break;
1557 	case WM_T_82540:
1558 	case WM_T_82545:
1559 	case WM_T_82545_3:
1560 	case WM_T_82546:
1561 	case WM_T_82546_3:
1562 		/* Microwire */
1563 		reg = CSR_READ(sc, WMREG_EECD);
1564 		if (reg & EECD_EE_SIZE)
1565 			sc->sc_ee_addrbits = 8;
1566 		else
1567 			sc->sc_ee_addrbits = 6;
1568 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1569 		break;
1570 	case WM_T_82541:
1571 	case WM_T_82541_2:
1572 	case WM_T_82547:
1573 	case WM_T_82547_2:
1574 		reg = CSR_READ(sc, WMREG_EECD);
1575 		if (reg & EECD_EE_TYPE) {
1576 			/* SPI */
1577 			wm_set_spiaddrbits(sc);
1578 		} else
1579 			/* Microwire */
1580 			sc->sc_ee_addrbits = (reg & EECD_EE_ABITS) ? 8 : 6;
1581 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1582 		break;
1583 	case WM_T_82571:
1584 	case WM_T_82572:
1585 		/* SPI */
1586 		wm_set_spiaddrbits(sc);
1587 		sc->sc_flags |= WM_F_EEPROM_HANDSHAKE;
1588 		break;
1589 	case WM_T_82573:
1590 	case WM_T_82574:
1591 	case WM_T_82583:
1592 		if (wm_is_onboard_nvm_eeprom(sc) == 0)
1593 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1594 		else {
1595 			/* SPI */
1596 			wm_set_spiaddrbits(sc);
1597 		}
1598 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1599 		break;
1600 	case WM_T_82575:
1601 	case WM_T_82576:
1602 	case WM_T_82580:
1603 	case WM_T_82580ER:
1604 	case WM_T_I350:
1605 	case WM_T_I354: /* XXXX ok? */
1606 	case WM_T_80003:
1607 		/* SPI */
1608 		wm_set_spiaddrbits(sc);
1609 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1610 		break;
1611 	case WM_T_ICH8:
1612 	case WM_T_ICH9:
1613 	case WM_T_ICH10:
1614 	case WM_T_PCH:
1615 	case WM_T_PCH2:
1616 	case WM_T_PCH_LPT:
1617 		/* FLASH */
1618 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_SWFWHW_SYNC;
1619 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_ICH8_FLASH);
1620 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1621 		    &sc->sc_flasht, &sc->sc_flashh, NULL, NULL)) {
1622 			aprint_error_dev(sc->sc_dev,
1623 			    "can't map FLASH registers\n");
1624 			return;
1625 		}
1626 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1627 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1628 						ICH_FLASH_SECTOR_SIZE;
1629 		sc->sc_ich8_flash_bank_size =
1630 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1631 		sc->sc_ich8_flash_bank_size -=
1632 		    (reg & ICH_GFPREG_BASE_MASK);
1633 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1634 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1635 		break;
1636 	case WM_T_I210:
1637 	case WM_T_I211:
1638 		sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1639 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_SWFW_SYNC;
1640 		break;
1641 	default:
1642 		break;
1643 	}
1644 
1645 	/*
1646 	 * Defer printing the EEPROM type until after verifying the checksum
1647 	 * This allows the EEPROM type to be printed correctly in the case
1648 	 * that no EEPROM is attached.
1649 	 */
1650 	/*
1651 	 * Validate the EEPROM checksum. If the checksum fails, flag
1652 	 * this for later, so we can fail future reads from the EEPROM.
1653 	 */
1654 	if (wm_validate_eeprom_checksum(sc)) {
1655 		/*
1656 		 * Read twice again because some PCI-e parts fail the
1657 		 * first check due to the link being in sleep state.
1658 		 */
1659 		if (wm_validate_eeprom_checksum(sc))
1660 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1661 	}
1662 
1663 	/* Set device properties (macflags) */
1664 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1665 
1666 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1667 		aprint_verbose_dev(sc->sc_dev, "No EEPROM\n");
1668 	else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW) {
1669 		aprint_verbose_dev(sc->sc_dev, "FLASH(HW)\n");
1670 	} else if (sc->sc_flags & WM_F_EEPROM_FLASH) {
1671 		aprint_verbose_dev(sc->sc_dev, "FLASH\n");
1672 	} else {
1673 		if (sc->sc_flags & WM_F_EEPROM_SPI)
1674 			eetype = "SPI";
1675 		else
1676 			eetype = "MicroWire";
1677 		aprint_verbose_dev(sc->sc_dev,
1678 		    "%u word (%d address bits) %s EEPROM\n",
1679 		    1U << sc->sc_ee_addrbits,
1680 		    sc->sc_ee_addrbits, eetype);
1681 	}
1682 
1683 	switch (sc->sc_type) {
1684 	case WM_T_82571:
1685 	case WM_T_82572:
1686 	case WM_T_82573:
1687 	case WM_T_82574:
1688 	case WM_T_82583:
1689 	case WM_T_80003:
1690 	case WM_T_ICH8:
1691 	case WM_T_ICH9:
1692 	case WM_T_ICH10:
1693 	case WM_T_PCH:
1694 	case WM_T_PCH2:
1695 	case WM_T_PCH_LPT:
1696 		if (wm_check_mng_mode(sc) != 0)
1697 			wm_get_hw_control(sc);
1698 		break;
1699 	default:
1700 		break;
1701 	}
1702 	wm_get_wakeup(sc);
1703 	/*
1704 	 * Read the Ethernet address from the EEPROM, if not first found
1705 	 * in device properties.
1706 	 */
1707 	ea = prop_dictionary_get(dict, "mac-address");
1708 	if (ea != NULL) {
1709 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
1710 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
1711 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
1712 	} else {
1713 		if (wm_read_mac_addr(sc, enaddr) != 0) {
1714 			aprint_error_dev(sc->sc_dev,
1715 			    "unable to read Ethernet address\n");
1716 			return;
1717 		}
1718 	}
1719 
1720 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
1721 	    ether_sprintf(enaddr));
1722 
1723 	/*
1724 	 * Read the config info from the EEPROM, and set up various
1725 	 * bits in the control registers based on their contents.
1726 	 */
1727 	pn = prop_dictionary_get(dict, "i82543-cfg1");
1728 	if (pn != NULL) {
1729 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1730 		cfg1 = (uint16_t) prop_number_integer_value(pn);
1731 	} else {
1732 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG1, 1, &cfg1)) {
1733 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
1734 			return;
1735 		}
1736 	}
1737 
1738 	pn = prop_dictionary_get(dict, "i82543-cfg2");
1739 	if (pn != NULL) {
1740 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1741 		cfg2 = (uint16_t) prop_number_integer_value(pn);
1742 	} else {
1743 		if (wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &cfg2)) {
1744 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
1745 			return;
1746 		}
1747 	}
1748 
1749 	/* check for WM_F_WOL */
1750 	switch (sc->sc_type) {
1751 	case WM_T_82542_2_0:
1752 	case WM_T_82542_2_1:
1753 	case WM_T_82543:
1754 		/* dummy? */
1755 		eeprom_data = 0;
1756 		apme_mask = EEPROM_CFG3_APME;
1757 		break;
1758 	case WM_T_82544:
1759 		apme_mask = EEPROM_CFG2_82544_APM_EN;
1760 		eeprom_data = cfg2;
1761 		break;
1762 	case WM_T_82546:
1763 	case WM_T_82546_3:
1764 	case WM_T_82571:
1765 	case WM_T_82572:
1766 	case WM_T_82573:
1767 	case WM_T_82574:
1768 	case WM_T_82583:
1769 	case WM_T_80003:
1770 	default:
1771 		apme_mask = EEPROM_CFG3_APME;
1772 		wm_read_eeprom(sc, (sc->sc_funcid == 1) ? EEPROM_OFF_CFG3_PORTB
1773 		    : EEPROM_OFF_CFG3_PORTA, 1, &eeprom_data);
1774 		break;
1775 	case WM_T_82575:
1776 	case WM_T_82576:
1777 	case WM_T_82580:
1778 	case WM_T_82580ER:
1779 	case WM_T_I350:
1780 	case WM_T_I354: /* XXX ok? */
1781 	case WM_T_ICH8:
1782 	case WM_T_ICH9:
1783 	case WM_T_ICH10:
1784 	case WM_T_PCH:
1785 	case WM_T_PCH2:
1786 	case WM_T_PCH_LPT:
1787 		/* XXX The funcid should be checked on some devices */
1788 		apme_mask = WUC_APME;
1789 		eeprom_data = CSR_READ(sc, WMREG_WUC);
1790 		break;
1791 	}
1792 
1793 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
1794 	if ((eeprom_data & apme_mask) != 0)
1795 		sc->sc_flags |= WM_F_WOL;
1796 #ifdef WM_DEBUG
1797 	if ((sc->sc_flags & WM_F_WOL) != 0)
1798 		printf("WOL\n");
1799 #endif
1800 
1801 	/*
1802 	 * XXX need special handling for some multiple port cards
1803 	 * to disable a paticular port.
1804 	 */
1805 
1806 	if (sc->sc_type >= WM_T_82544) {
1807 		pn = prop_dictionary_get(dict, "i82543-swdpin");
1808 		if (pn != NULL) {
1809 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
1810 			swdpin = (uint16_t) prop_number_integer_value(pn);
1811 		} else {
1812 			if (wm_read_eeprom(sc, EEPROM_OFF_SWDPIN, 1, &swdpin)) {
1813 				aprint_error_dev(sc->sc_dev,
1814 				    "unable to read SWDPIN\n");
1815 				return;
1816 			}
1817 		}
1818 	}
1819 
1820 	if (cfg1 & EEPROM_CFG1_ILOS)
1821 		sc->sc_ctrl |= CTRL_ILOS;
1822 	if (sc->sc_type >= WM_T_82544) {
1823 		sc->sc_ctrl |=
1824 		    ((swdpin >> EEPROM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
1825 		    CTRL_SWDPIO_SHIFT;
1826 		sc->sc_ctrl |=
1827 		    ((swdpin >> EEPROM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
1828 		    CTRL_SWDPINS_SHIFT;
1829 	} else {
1830 		sc->sc_ctrl |=
1831 		    ((cfg1 >> EEPROM_CFG1_SWDPIO_SHIFT) & 0xf) <<
1832 		    CTRL_SWDPIO_SHIFT;
1833 	}
1834 
1835 #if 0
1836 	if (sc->sc_type >= WM_T_82544) {
1837 		if (cfg1 & EEPROM_CFG1_IPS0)
1838 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
1839 		if (cfg1 & EEPROM_CFG1_IPS1)
1840 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
1841 		sc->sc_ctrl_ext |=
1842 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
1843 		    CTRL_EXT_SWDPIO_SHIFT;
1844 		sc->sc_ctrl_ext |=
1845 		    ((swdpin >> (EEPROM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
1846 		    CTRL_EXT_SWDPINS_SHIFT;
1847 	} else {
1848 		sc->sc_ctrl_ext |=
1849 		    ((cfg2 >> EEPROM_CFG2_SWDPIO_SHIFT) & 0xf) <<
1850 		    CTRL_EXT_SWDPIO_SHIFT;
1851 	}
1852 #endif
1853 
1854 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
1855 #if 0
1856 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
1857 #endif
1858 
1859 	/*
1860 	 * Set up some register offsets that are different between
1861 	 * the i82542 and the i82543 and later chips.
1862 	 */
1863 	if (sc->sc_type < WM_T_82543) {
1864 		sc->sc_rdt_reg = WMREG_OLD_RDT0;
1865 		sc->sc_tdt_reg = WMREG_OLD_TDT;
1866 	} else {
1867 		sc->sc_rdt_reg = WMREG_RDT;
1868 		sc->sc_tdt_reg = WMREG_TDT;
1869 	}
1870 
1871 	if (sc->sc_type == WM_T_PCH) {
1872 		uint16_t val;
1873 
1874 		/* Save the NVM K1 bit setting */
1875 		wm_read_eeprom(sc, EEPROM_OFF_K1_CONFIG, 1, &val);
1876 
1877 		if ((val & EEPROM_K1_CONFIG_ENABLE) != 0)
1878 			sc->sc_nvm_k1_enabled = 1;
1879 		else
1880 			sc->sc_nvm_k1_enabled = 0;
1881 	}
1882 
1883 	/*
1884 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
1885 	 * media structures accordingly.
1886 	 */
1887 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
1888 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
1889 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
1890 	    || sc->sc_type == WM_T_82573
1891 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
1892 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
1893 		wm_gmii_mediainit(sc, wmp->wmp_product);
1894 	} else if (sc->sc_type < WM_T_82543 ||
1895 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
1896 		if (wmp->wmp_flags & WMP_F_1000T)
1897 			aprint_error_dev(sc->sc_dev,
1898 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
1899 		wm_tbi_mediainit(sc);
1900 	} else {
1901 		switch (sc->sc_type) {
1902 		case WM_T_82575:
1903 		case WM_T_82576:
1904 		case WM_T_82580:
1905 		case WM_T_82580ER:
1906 		case WM_T_I350:
1907 		case WM_T_I354:
1908 		case WM_T_I210:
1909 		case WM_T_I211:
1910 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
1911 			switch (reg & CTRL_EXT_LINK_MODE_MASK) {
1912 			case CTRL_EXT_LINK_MODE_1000KX:
1913 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
1914 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1915 				    reg | CTRL_EXT_I2C_ENA);
1916 				panic("not supported yet\n");
1917 				break;
1918 			case CTRL_EXT_LINK_MODE_SGMII:
1919 				if (wm_sgmii_uses_mdio(sc)) {
1920 					aprint_verbose_dev(sc->sc_dev,
1921 					    "SGMII(MDIO)\n");
1922 					sc->sc_flags |= WM_F_SGMII;
1923 					wm_gmii_mediainit(sc,
1924 					    wmp->wmp_product);
1925 					break;
1926 				}
1927 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
1928 				/*FALLTHROUGH*/
1929 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
1930 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
1931 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1932 				    reg | CTRL_EXT_I2C_ENA);
1933 				panic("not supported yet\n");
1934 				break;
1935 			case CTRL_EXT_LINK_MODE_GMII:
1936 			default:
1937 				CSR_WRITE(sc, WMREG_CTRL_EXT,
1938 				    reg & ~CTRL_EXT_I2C_ENA);
1939 				wm_gmii_mediainit(sc, wmp->wmp_product);
1940 				break;
1941 			}
1942 			break;
1943 		default:
1944 			if (wmp->wmp_flags & WMP_F_1000X)
1945 				aprint_error_dev(sc->sc_dev,
1946 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
1947 			wm_gmii_mediainit(sc, wmp->wmp_product);
1948 		}
1949 	}
1950 
1951 	ifp = &sc->sc_ethercom.ec_if;
1952 	xname = device_xname(sc->sc_dev);
1953 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
1954 	ifp->if_softc = sc;
1955 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1956 	ifp->if_ioctl = wm_ioctl;
1957 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
1958 		ifp->if_start = wm_nq_start;
1959 	else
1960 		ifp->if_start = wm_start;
1961 	ifp->if_watchdog = wm_watchdog;
1962 	ifp->if_init = wm_init;
1963 	ifp->if_stop = wm_stop;
1964 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
1965 	IFQ_SET_READY(&ifp->if_snd);
1966 
1967 	/* Check for jumbo frame */
1968 	switch (sc->sc_type) {
1969 	case WM_T_82573:
1970 		/* XXX limited to 9234 if ASPM is disabled */
1971 		wm_read_eeprom(sc, EEPROM_INIT_3GIO_3, 1, &io3);
1972 		if ((io3 & EEPROM_3GIO_3_ASPM_MASK) != 0)
1973 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1974 		break;
1975 	case WM_T_82571:
1976 	case WM_T_82572:
1977 	case WM_T_82574:
1978 	case WM_T_82575:
1979 	case WM_T_82576:
1980 	case WM_T_82580:
1981 	case WM_T_82580ER:
1982 	case WM_T_I350:
1983 	case WM_T_I354: /* XXXX ok? */
1984 	case WM_T_I210:
1985 	case WM_T_I211:
1986 	case WM_T_80003:
1987 	case WM_T_ICH9:
1988 	case WM_T_ICH10:
1989 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
1990 	case WM_T_PCH_LPT:
1991 		/* XXX limited to 9234 */
1992 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1993 		break;
1994 	case WM_T_PCH:
1995 		/* XXX limited to 4096 */
1996 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
1997 		break;
1998 	case WM_T_82542_2_0:
1999 	case WM_T_82542_2_1:
2000 	case WM_T_82583:
2001 	case WM_T_ICH8:
2002 		/* No support for jumbo frame */
2003 		break;
2004 	default:
2005 		/* ETHER_MAX_LEN_JUMBO */
2006 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2007 		break;
2008 	}
2009 
2010 	/*
2011 	 * If we're a i82543 or greater, we can support VLANs.
2012 	 */
2013 	if (sc->sc_type >= WM_T_82543)
2014 		sc->sc_ethercom.ec_capabilities |=
2015 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2016 
2017 	/*
2018 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2019 	 * on i82543 and later.
2020 	 */
2021 	if (sc->sc_type >= WM_T_82543) {
2022 		ifp->if_capabilities |=
2023 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2024 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2025 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2026 		    IFCAP_CSUM_TCPv6_Tx |
2027 		    IFCAP_CSUM_UDPv6_Tx;
2028 	}
2029 
2030 	/*
2031 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2032 	 *
2033 	 *	82541GI (8086:1076) ... no
2034 	 *	82572EI (8086:10b9) ... yes
2035 	 */
2036 	if (sc->sc_type >= WM_T_82571) {
2037 		ifp->if_capabilities |=
2038 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2039 	}
2040 
2041 	/*
2042 	 * If we're a i82544 or greater (except i82547), we can do
2043 	 * TCP segmentation offload.
2044 	 */
2045 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2046 		ifp->if_capabilities |= IFCAP_TSOv4;
2047 	}
2048 
2049 	if (sc->sc_type >= WM_T_82571) {
2050 		ifp->if_capabilities |= IFCAP_TSOv6;
2051 	}
2052 
2053 	/*
2054 	 * Attach the interface.
2055 	 */
2056 	if_attach(ifp);
2057 	ether_ifattach(ifp, enaddr);
2058 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2059 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET, 0);
2060 
2061 #ifdef WM_EVENT_COUNTERS
2062 	/* Attach event counters. */
2063 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2064 	    NULL, xname, "txsstall");
2065 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2066 	    NULL, xname, "txdstall");
2067 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2068 	    NULL, xname, "txfifo_stall");
2069 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2070 	    NULL, xname, "txdw");
2071 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2072 	    NULL, xname, "txqe");
2073 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2074 	    NULL, xname, "rxintr");
2075 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2076 	    NULL, xname, "linkintr");
2077 
2078 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2079 	    NULL, xname, "rxipsum");
2080 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2081 	    NULL, xname, "rxtusum");
2082 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2083 	    NULL, xname, "txipsum");
2084 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2085 	    NULL, xname, "txtusum");
2086 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2087 	    NULL, xname, "txtusum6");
2088 
2089 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2090 	    NULL, xname, "txtso");
2091 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2092 	    NULL, xname, "txtso6");
2093 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2094 	    NULL, xname, "txtsopain");
2095 
2096 	for (i = 0; i < WM_NTXSEGS; i++) {
2097 		sprintf(wm_txseg_evcnt_names[i], "txseg%d", i);
2098 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2099 		    NULL, xname, wm_txseg_evcnt_names[i]);
2100 	}
2101 
2102 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2103 	    NULL, xname, "txdrop");
2104 
2105 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2106 	    NULL, xname, "tu");
2107 
2108 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2109 	    NULL, xname, "tx_xoff");
2110 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2111 	    NULL, xname, "tx_xon");
2112 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2113 	    NULL, xname, "rx_xoff");
2114 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2115 	    NULL, xname, "rx_xon");
2116 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2117 	    NULL, xname, "rx_macctl");
2118 #endif /* WM_EVENT_COUNTERS */
2119 
2120 	if (pmf_device_register(self, wm_suspend, wm_resume))
2121 		pmf_class_network_register(self, ifp);
2122 	else
2123 		aprint_error_dev(self, "couldn't establish power handler\n");
2124 
2125 	return;
2126 
2127 	/*
2128 	 * Free any resources we've allocated during the failed attach
2129 	 * attempt.  Do this in reverse order and fall through.
2130 	 */
2131  fail_5:
2132 	for (i = 0; i < WM_NRXDESC; i++) {
2133 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2134 			bus_dmamap_destroy(sc->sc_dmat,
2135 			    sc->sc_rxsoft[i].rxs_dmamap);
2136 	}
2137  fail_4:
2138 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2139 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2140 			bus_dmamap_destroy(sc->sc_dmat,
2141 			    sc->sc_txsoft[i].txs_dmamap);
2142 	}
2143 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2144  fail_3:
2145 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2146  fail_2:
2147 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2148 	    sc->sc_cd_size);
2149  fail_1:
2150 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2151  fail_0:
2152 	return;
2153 }
2154 
2155 static int
2156 wm_detach(device_t self, int flags __unused)
2157 {
2158 	struct wm_softc *sc = device_private(self);
2159 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2160 	int i, s;
2161 
2162 	s = splnet();
2163 	/* Stop the interface. Callouts are stopped in it. */
2164 	wm_stop(ifp, 1);
2165 	splx(s);
2166 
2167 	pmf_device_deregister(self);
2168 
2169 	/* Tell the firmware about the release */
2170 	wm_release_manageability(sc);
2171 	wm_release_hw_control(sc);
2172 
2173 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2174 
2175 	/* Delete all remaining media. */
2176 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2177 
2178 	ether_ifdetach(ifp);
2179 	if_detach(ifp);
2180 
2181 
2182 	/* Unload RX dmamaps and free mbufs */
2183 	wm_rxdrain(sc);
2184 
2185 	/* Free dmamap. It's the same as the end of the wm_attach() function */
2186 	for (i = 0; i < WM_NRXDESC; i++) {
2187 		if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
2188 			bus_dmamap_destroy(sc->sc_dmat,
2189 			    sc->sc_rxsoft[i].rxs_dmamap);
2190 	}
2191 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
2192 		if (sc->sc_txsoft[i].txs_dmamap != NULL)
2193 			bus_dmamap_destroy(sc->sc_dmat,
2194 			    sc->sc_txsoft[i].txs_dmamap);
2195 	}
2196 	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
2197 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
2198 	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data,
2199 	    sc->sc_cd_size);
2200 	bus_dmamem_free(sc->sc_dmat, &sc->sc_cd_seg, sc->sc_cd_rseg);
2201 
2202 	/* Disestablish the interrupt handler */
2203 	if (sc->sc_ih != NULL) {
2204 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
2205 		sc->sc_ih = NULL;
2206 	}
2207 
2208 	/* Unmap the registers */
2209 	if (sc->sc_ss) {
2210 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2211 		sc->sc_ss = 0;
2212 	}
2213 
2214 	if (sc->sc_ios) {
2215 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2216 		sc->sc_ios = 0;
2217 	}
2218 
2219 	return 0;
2220 }
2221 
2222 /*
2223  * wm_tx_offload:
2224  *
2225  *	Set up TCP/IP checksumming parameters for the
2226  *	specified packet.
2227  */
2228 static int
2229 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
2230     uint8_t *fieldsp)
2231 {
2232 	struct mbuf *m0 = txs->txs_mbuf;
2233 	struct livengood_tcpip_ctxdesc *t;
2234 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
2235 	uint32_t ipcse;
2236 	struct ether_header *eh;
2237 	int offset, iphl;
2238 	uint8_t fields;
2239 
2240 	/*
2241 	 * XXX It would be nice if the mbuf pkthdr had offset
2242 	 * fields for the protocol headers.
2243 	 */
2244 
2245 	eh = mtod(m0, struct ether_header *);
2246 	switch (htons(eh->ether_type)) {
2247 	case ETHERTYPE_IP:
2248 	case ETHERTYPE_IPV6:
2249 		offset = ETHER_HDR_LEN;
2250 		break;
2251 
2252 	case ETHERTYPE_VLAN:
2253 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2254 		break;
2255 
2256 	default:
2257 		/*
2258 		 * Don't support this protocol or encapsulation.
2259 		 */
2260 		*fieldsp = 0;
2261 		*cmdp = 0;
2262 		return 0;
2263 	}
2264 
2265 	if ((m0->m_pkthdr.csum_flags &
2266 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4)) != 0) {
2267 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2268 	} else {
2269 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2270 	}
2271 	ipcse = offset + iphl - 1;
2272 
2273 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
2274 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
2275 	seg = 0;
2276 	fields = 0;
2277 
2278 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2279 		int hlen = offset + iphl;
2280 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2281 
2282 		if (__predict_false(m0->m_len <
2283 				    (hlen + sizeof(struct tcphdr)))) {
2284 			/*
2285 			 * TCP/IP headers are not in the first mbuf; we need
2286 			 * to do this the slow and painful way.  Let's just
2287 			 * hope this doesn't happen very often.
2288 			 */
2289 			struct tcphdr th;
2290 
2291 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2292 
2293 			m_copydata(m0, hlen, sizeof(th), &th);
2294 			if (v4) {
2295 				struct ip ip;
2296 
2297 				m_copydata(m0, offset, sizeof(ip), &ip);
2298 				ip.ip_len = 0;
2299 				m_copyback(m0,
2300 				    offset + offsetof(struct ip, ip_len),
2301 				    sizeof(ip.ip_len), &ip.ip_len);
2302 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2303 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2304 			} else {
2305 				struct ip6_hdr ip6;
2306 
2307 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2308 				ip6.ip6_plen = 0;
2309 				m_copyback(m0,
2310 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2311 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2312 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2313 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2314 			}
2315 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2316 			    sizeof(th.th_sum), &th.th_sum);
2317 
2318 			hlen += th.th_off << 2;
2319 		} else {
2320 			/*
2321 			 * TCP/IP headers are in the first mbuf; we can do
2322 			 * this the easy way.
2323 			 */
2324 			struct tcphdr *th;
2325 
2326 			if (v4) {
2327 				struct ip *ip =
2328 				    (void *)(mtod(m0, char *) + offset);
2329 				th = (void *)(mtod(m0, char *) + hlen);
2330 
2331 				ip->ip_len = 0;
2332 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2333 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2334 			} else {
2335 				struct ip6_hdr *ip6 =
2336 				    (void *)(mtod(m0, char *) + offset);
2337 				th = (void *)(mtod(m0, char *) + hlen);
2338 
2339 				ip6->ip6_plen = 0;
2340 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2341 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2342 			}
2343 			hlen += th->th_off << 2;
2344 		}
2345 
2346 		if (v4) {
2347 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2348 			cmdlen |= WTX_TCPIP_CMD_IP;
2349 		} else {
2350 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2351 			ipcse = 0;
2352 		}
2353 		cmd |= WTX_TCPIP_CMD_TSE;
2354 		cmdlen |= WTX_TCPIP_CMD_TSE |
2355 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
2356 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
2357 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
2358 	}
2359 
2360 	/*
2361 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
2362 	 * offload feature, if we load the context descriptor, we
2363 	 * MUST provide valid values for IPCSS and TUCSS fields.
2364 	 */
2365 
2366 	ipcs = WTX_TCPIP_IPCSS(offset) |
2367 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
2368 	    WTX_TCPIP_IPCSE(ipcse);
2369 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4|M_CSUM_TSOv4)) {
2370 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
2371 		fields |= WTX_IXSM;
2372 	}
2373 
2374 	offset += iphl;
2375 
2376 	if (m0->m_pkthdr.csum_flags &
2377 	    (M_CSUM_TCPv4|M_CSUM_UDPv4|M_CSUM_TSOv4)) {
2378 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
2379 		fields |= WTX_TXSM;
2380 		tucs = WTX_TCPIP_TUCSS(offset) |
2381 		    WTX_TCPIP_TUCSO(offset +
2382 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
2383 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2384 	} else if ((m0->m_pkthdr.csum_flags &
2385 	    (M_CSUM_TCPv6|M_CSUM_UDPv6|M_CSUM_TSOv6)) != 0) {
2386 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
2387 		fields |= WTX_TXSM;
2388 		tucs = WTX_TCPIP_TUCSS(offset) |
2389 		    WTX_TCPIP_TUCSO(offset +
2390 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
2391 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2392 	} else {
2393 		/* Just initialize it to a valid TCP context. */
2394 		tucs = WTX_TCPIP_TUCSS(offset) |
2395 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
2396 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
2397 	}
2398 
2399 	/* Fill in the context descriptor. */
2400 	t = (struct livengood_tcpip_ctxdesc *)
2401 	    &sc->sc_txdescs[sc->sc_txnext];
2402 	t->tcpip_ipcs = htole32(ipcs);
2403 	t->tcpip_tucs = htole32(tucs);
2404 	t->tcpip_cmdlen = htole32(cmdlen);
2405 	t->tcpip_seg = htole32(seg);
2406 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
2407 
2408 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
2409 	txs->txs_ndesc++;
2410 
2411 	*cmdp = cmd;
2412 	*fieldsp = fields;
2413 
2414 	return 0;
2415 }
2416 
2417 static void
2418 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
2419 {
2420 	struct mbuf *m;
2421 	int i;
2422 
2423 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
2424 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
2425 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
2426 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
2427 		    m->m_data, m->m_len, m->m_flags);
2428 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
2429 	    i, i == 1 ? "" : "s");
2430 }
2431 
2432 /*
2433  * wm_82547_txfifo_stall:
2434  *
2435  *	Callout used to wait for the 82547 Tx FIFO to drain,
2436  *	reset the FIFO pointers, and restart packet transmission.
2437  */
2438 static void
2439 wm_82547_txfifo_stall(void *arg)
2440 {
2441 	struct wm_softc *sc = arg;
2442 	int s;
2443 
2444 	s = splnet();
2445 
2446 	if (sc->sc_txfifo_stall) {
2447 		if (CSR_READ(sc, WMREG_TDT) == CSR_READ(sc, WMREG_TDH) &&
2448 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
2449 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
2450 			/*
2451 			 * Packets have drained.  Stop transmitter, reset
2452 			 * FIFO pointers, restart transmitter, and kick
2453 			 * the packet queue.
2454 			 */
2455 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
2456 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
2457 			CSR_WRITE(sc, WMREG_TDFT, sc->sc_txfifo_addr);
2458 			CSR_WRITE(sc, WMREG_TDFH, sc->sc_txfifo_addr);
2459 			CSR_WRITE(sc, WMREG_TDFTS, sc->sc_txfifo_addr);
2460 			CSR_WRITE(sc, WMREG_TDFHS, sc->sc_txfifo_addr);
2461 			CSR_WRITE(sc, WMREG_TCTL, tctl);
2462 			CSR_WRITE_FLUSH(sc);
2463 
2464 			sc->sc_txfifo_head = 0;
2465 			sc->sc_txfifo_stall = 0;
2466 			wm_start(&sc->sc_ethercom.ec_if);
2467 		} else {
2468 			/*
2469 			 * Still waiting for packets to drain; try again in
2470 			 * another tick.
2471 			 */
2472 			callout_schedule(&sc->sc_txfifo_ch, 1);
2473 		}
2474 	}
2475 
2476 	splx(s);
2477 }
2478 
2479 static void
2480 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
2481 {
2482 	uint32_t reg;
2483 
2484 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
2485 
2486 	if (on != 0)
2487 		reg |= EXTCNFCTR_GATE_PHY_CFG;
2488 	else
2489 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
2490 
2491 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
2492 }
2493 
2494 /*
2495  * wm_82547_txfifo_bugchk:
2496  *
2497  *	Check for bug condition in the 82547 Tx FIFO.  We need to
2498  *	prevent enqueueing a packet that would wrap around the end
2499  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
2500  *
2501  *	We do this by checking the amount of space before the end
2502  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
2503  *	the Tx FIFO, wait for all remaining packets to drain, reset
2504  *	the internal FIFO pointers to the beginning, and restart
2505  *	transmission on the interface.
2506  */
2507 #define	WM_FIFO_HDR		0x10
2508 #define	WM_82547_PAD_LEN	0x3e0
2509 static int
2510 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
2511 {
2512 	int space = sc->sc_txfifo_size - sc->sc_txfifo_head;
2513 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
2514 
2515 	/* Just return if already stalled. */
2516 	if (sc->sc_txfifo_stall)
2517 		return 1;
2518 
2519 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
2520 		/* Stall only occurs in half-duplex mode. */
2521 		goto send_packet;
2522 	}
2523 
2524 	if (len >= WM_82547_PAD_LEN + space) {
2525 		sc->sc_txfifo_stall = 1;
2526 		callout_schedule(&sc->sc_txfifo_ch, 1);
2527 		return 1;
2528 	}
2529 
2530  send_packet:
2531 	sc->sc_txfifo_head += len;
2532 	if (sc->sc_txfifo_head >= sc->sc_txfifo_size)
2533 		sc->sc_txfifo_head -= sc->sc_txfifo_size;
2534 
2535 	return 0;
2536 }
2537 
2538 /*
2539  * wm_start:		[ifnet interface function]
2540  *
2541  *	Start packet transmission on the interface.
2542  */
2543 static void
2544 wm_start(struct ifnet *ifp)
2545 {
2546 	struct wm_softc *sc = ifp->if_softc;
2547 	struct mbuf *m0;
2548 	struct m_tag *mtag;
2549 	struct wm_txsoft *txs;
2550 	bus_dmamap_t dmamap;
2551 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
2552 	bus_addr_t curaddr;
2553 	bus_size_t seglen, curlen;
2554 	uint32_t cksumcmd;
2555 	uint8_t cksumfields;
2556 
2557 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
2558 		return;
2559 
2560 	/*
2561 	 * Remember the previous number of free descriptors.
2562 	 */
2563 	ofree = sc->sc_txfree;
2564 
2565 	/*
2566 	 * Loop through the send queue, setting up transmit descriptors
2567 	 * until we drain the queue, or use up all available transmit
2568 	 * descriptors.
2569 	 */
2570 	for (;;) {
2571 		/* Grab a packet off the queue. */
2572 		IFQ_POLL(&ifp->if_snd, m0);
2573 		if (m0 == NULL)
2574 			break;
2575 
2576 		DPRINTF(WM_DEBUG_TX,
2577 		    ("%s: TX: have packet to transmit: %p\n",
2578 		    device_xname(sc->sc_dev), m0));
2579 
2580 		/* Get a work queue entry. */
2581 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
2582 			wm_txintr(sc);
2583 			if (sc->sc_txsfree == 0) {
2584 				DPRINTF(WM_DEBUG_TX,
2585 				    ("%s: TX: no free job descriptors\n",
2586 					device_xname(sc->sc_dev)));
2587 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
2588 				break;
2589 			}
2590 		}
2591 
2592 		txs = &sc->sc_txsoft[sc->sc_txsnext];
2593 		dmamap = txs->txs_dmamap;
2594 
2595 		use_tso = (m0->m_pkthdr.csum_flags &
2596 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
2597 
2598 		/*
2599 		 * So says the Linux driver:
2600 		 * The controller does a simple calculation to make sure
2601 		 * there is enough room in the FIFO before initiating the
2602 		 * DMA for each buffer.  The calc is:
2603 		 *	4 = ceil(buffer len / MSS)
2604 		 * To make sure we don't overrun the FIFO, adjust the max
2605 		 * buffer len if the MSS drops.
2606 		 */
2607 		dmamap->dm_maxsegsz =
2608 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
2609 		    ? m0->m_pkthdr.segsz << 2
2610 		    : WTX_MAX_LEN;
2611 
2612 		/*
2613 		 * Load the DMA map.  If this fails, the packet either
2614 		 * didn't fit in the allotted number of segments, or we
2615 		 * were short on resources.  For the too-many-segments
2616 		 * case, we simply report an error and drop the packet,
2617 		 * since we can't sanely copy a jumbo packet to a single
2618 		 * buffer.
2619 		 */
2620 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
2621 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
2622 		if (error) {
2623 			if (error == EFBIG) {
2624 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
2625 				log(LOG_ERR, "%s: Tx packet consumes too many "
2626 				    "DMA segments, dropping...\n",
2627 				    device_xname(sc->sc_dev));
2628 				IFQ_DEQUEUE(&ifp->if_snd, m0);
2629 				wm_dump_mbuf_chain(sc, m0);
2630 				m_freem(m0);
2631 				continue;
2632 			}
2633 			/*
2634 			 * Short on resources, just stop for now.
2635 			 */
2636 			DPRINTF(WM_DEBUG_TX,
2637 			    ("%s: TX: dmamap load failed: %d\n",
2638 			    device_xname(sc->sc_dev), error));
2639 			break;
2640 		}
2641 
2642 		segs_needed = dmamap->dm_nsegs;
2643 		if (use_tso) {
2644 			/* For sentinel descriptor; see below. */
2645 			segs_needed++;
2646 		}
2647 
2648 		/*
2649 		 * Ensure we have enough descriptors free to describe
2650 		 * the packet.  Note, we always reserve one descriptor
2651 		 * at the end of the ring due to the semantics of the
2652 		 * TDT register, plus one more in the event we need
2653 		 * to load offload context.
2654 		 */
2655 		if (segs_needed > sc->sc_txfree - 2) {
2656 			/*
2657 			 * Not enough free descriptors to transmit this
2658 			 * packet.  We haven't committed anything yet,
2659 			 * so just unload the DMA map, put the packet
2660 			 * pack on the queue, and punt.  Notify the upper
2661 			 * layer that there are no more slots left.
2662 			 */
2663 			DPRINTF(WM_DEBUG_TX,
2664 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
2665 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
2666 			    segs_needed, sc->sc_txfree - 1));
2667 			ifp->if_flags |= IFF_OACTIVE;
2668 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2669 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
2670 			break;
2671 		}
2672 
2673 		/*
2674 		 * Check for 82547 Tx FIFO bug.  We need to do this
2675 		 * once we know we can transmit the packet, since we
2676 		 * do some internal FIFO space accounting here.
2677 		 */
2678 		if (sc->sc_type == WM_T_82547 &&
2679 		    wm_82547_txfifo_bugchk(sc, m0)) {
2680 			DPRINTF(WM_DEBUG_TX,
2681 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
2682 			    device_xname(sc->sc_dev)));
2683 			ifp->if_flags |= IFF_OACTIVE;
2684 			bus_dmamap_unload(sc->sc_dmat, dmamap);
2685 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
2686 			break;
2687 		}
2688 
2689 		IFQ_DEQUEUE(&ifp->if_snd, m0);
2690 
2691 		/*
2692 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
2693 		 */
2694 
2695 		DPRINTF(WM_DEBUG_TX,
2696 		    ("%s: TX: packet has %d (%d) DMA segments\n",
2697 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
2698 
2699 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
2700 
2701 		/*
2702 		 * Store a pointer to the packet so that we can free it
2703 		 * later.
2704 		 *
2705 		 * Initially, we consider the number of descriptors the
2706 		 * packet uses the number of DMA segments.  This may be
2707 		 * incremented by 1 if we do checksum offload (a descriptor
2708 		 * is used to set the checksum context).
2709 		 */
2710 		txs->txs_mbuf = m0;
2711 		txs->txs_firstdesc = sc->sc_txnext;
2712 		txs->txs_ndesc = segs_needed;
2713 
2714 		/* Set up offload parameters for this packet. */
2715 		if (m0->m_pkthdr.csum_flags &
2716 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
2717 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
2718 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
2719 			if (wm_tx_offload(sc, txs, &cksumcmd,
2720 					  &cksumfields) != 0) {
2721 				/* Error message already displayed. */
2722 				bus_dmamap_unload(sc->sc_dmat, dmamap);
2723 				continue;
2724 			}
2725 		} else {
2726 			cksumcmd = 0;
2727 			cksumfields = 0;
2728 		}
2729 
2730 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
2731 
2732 		/* Sync the DMA map. */
2733 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
2734 		    BUS_DMASYNC_PREWRITE);
2735 
2736 		/*
2737 		 * Initialize the transmit descriptor.
2738 		 */
2739 		for (nexttx = sc->sc_txnext, seg = 0;
2740 		     seg < dmamap->dm_nsegs; seg++) {
2741 			for (seglen = dmamap->dm_segs[seg].ds_len,
2742 			     curaddr = dmamap->dm_segs[seg].ds_addr;
2743 			     seglen != 0;
2744 			     curaddr += curlen, seglen -= curlen,
2745 			     nexttx = WM_NEXTTX(sc, nexttx)) {
2746 				curlen = seglen;
2747 
2748 				/*
2749 				 * So says the Linux driver:
2750 				 * Work around for premature descriptor
2751 				 * write-backs in TSO mode.  Append a
2752 				 * 4-byte sentinel descriptor.
2753 				 */
2754 				if (use_tso &&
2755 				    seg == dmamap->dm_nsegs - 1 &&
2756 				    curlen > 8)
2757 					curlen -= 4;
2758 
2759 				wm_set_dma_addr(
2760 				    &sc->sc_txdescs[nexttx].wtx_addr,
2761 				    curaddr);
2762 				sc->sc_txdescs[nexttx].wtx_cmdlen =
2763 				    htole32(cksumcmd | curlen);
2764 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_status =
2765 				    0;
2766 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_options =
2767 				    cksumfields;
2768 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
2769 				lasttx = nexttx;
2770 
2771 				DPRINTF(WM_DEBUG_TX,
2772 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
2773 				     "len %#04zx\n",
2774 				    device_xname(sc->sc_dev), nexttx,
2775 				    (uint64_t)curaddr, curlen));
2776 			}
2777 		}
2778 
2779 		KASSERT(lasttx != -1);
2780 
2781 		/*
2782 		 * Set up the command byte on the last descriptor of
2783 		 * the packet.  If we're in the interrupt delay window,
2784 		 * delay the interrupt.
2785 		 */
2786 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
2787 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
2788 
2789 		/*
2790 		 * If VLANs are enabled and the packet has a VLAN tag, set
2791 		 * up the descriptor to encapsulate the packet for us.
2792 		 *
2793 		 * This is only valid on the last descriptor of the packet.
2794 		 */
2795 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2796 			sc->sc_txdescs[lasttx].wtx_cmdlen |=
2797 			    htole32(WTX_CMD_VLE);
2798 			sc->sc_txdescs[lasttx].wtx_fields.wtxu_vlan
2799 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
2800 		}
2801 
2802 		txs->txs_lastdesc = lasttx;
2803 
2804 		DPRINTF(WM_DEBUG_TX,
2805 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
2806 		    device_xname(sc->sc_dev),
2807 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
2808 
2809 		/* Sync the descriptors we're using. */
2810 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
2811 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2812 
2813 		/* Give the packet to the chip. */
2814 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
2815 
2816 		DPRINTF(WM_DEBUG_TX,
2817 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
2818 
2819 		DPRINTF(WM_DEBUG_TX,
2820 		    ("%s: TX: finished transmitting packet, job %d\n",
2821 		    device_xname(sc->sc_dev), sc->sc_txsnext));
2822 
2823 		/* Advance the tx pointer. */
2824 		sc->sc_txfree -= txs->txs_ndesc;
2825 		sc->sc_txnext = nexttx;
2826 
2827 		sc->sc_txsfree--;
2828 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
2829 
2830 		/* Pass the packet to any BPF listeners. */
2831 		bpf_mtap(ifp, m0);
2832 	}
2833 
2834 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
2835 		/* No more slots; notify upper layer. */
2836 		ifp->if_flags |= IFF_OACTIVE;
2837 	}
2838 
2839 	if (sc->sc_txfree != ofree) {
2840 		/* Set a watchdog timer in case the chip flakes out. */
2841 		ifp->if_timer = 5;
2842 	}
2843 }
2844 
2845 /*
2846  * wm_nq_tx_offload:
2847  *
2848  *	Set up TCP/IP checksumming parameters for the
2849  *	specified packet, for NEWQUEUE devices
2850  */
2851 static int
2852 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
2853     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
2854 {
2855 	struct mbuf *m0 = txs->txs_mbuf;
2856 	struct m_tag *mtag;
2857 	uint32_t vl_len, mssidx, cmdc;
2858 	struct ether_header *eh;
2859 	int offset, iphl;
2860 
2861 	/*
2862 	 * XXX It would be nice if the mbuf pkthdr had offset
2863 	 * fields for the protocol headers.
2864 	 */
2865 	*cmdlenp = 0;
2866 	*fieldsp = 0;
2867 
2868 	eh = mtod(m0, struct ether_header *);
2869 	switch (htons(eh->ether_type)) {
2870 	case ETHERTYPE_IP:
2871 	case ETHERTYPE_IPV6:
2872 		offset = ETHER_HDR_LEN;
2873 		break;
2874 
2875 	case ETHERTYPE_VLAN:
2876 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2877 		break;
2878 
2879 	default:
2880 		/*
2881 		 * Don't support this protocol or encapsulation.
2882 		 */
2883 		*do_csum = false;
2884 		return 0;
2885 	}
2886 	*do_csum = true;
2887 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
2888 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
2889 
2890 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
2891 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
2892 
2893 	if ((m0->m_pkthdr.csum_flags &
2894 	    (M_CSUM_TSOv4|M_CSUM_UDPv4|M_CSUM_TCPv4|M_CSUM_IPv4)) != 0) {
2895 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
2896 	} else {
2897 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
2898 	}
2899 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
2900 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
2901 
2902 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
2903 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
2904 		     << NQTXC_VLLEN_VLAN_SHIFT);
2905 		*cmdlenp |= NQTX_CMD_VLE;
2906 	}
2907 
2908 	mssidx = 0;
2909 
2910 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
2911 		int hlen = offset + iphl;
2912 		int tcp_hlen;
2913 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
2914 
2915 		if (__predict_false(m0->m_len <
2916 				    (hlen + sizeof(struct tcphdr)))) {
2917 			/*
2918 			 * TCP/IP headers are not in the first mbuf; we need
2919 			 * to do this the slow and painful way.  Let's just
2920 			 * hope this doesn't happen very often.
2921 			 */
2922 			struct tcphdr th;
2923 
2924 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
2925 
2926 			m_copydata(m0, hlen, sizeof(th), &th);
2927 			if (v4) {
2928 				struct ip ip;
2929 
2930 				m_copydata(m0, offset, sizeof(ip), &ip);
2931 				ip.ip_len = 0;
2932 				m_copyback(m0,
2933 				    offset + offsetof(struct ip, ip_len),
2934 				    sizeof(ip.ip_len), &ip.ip_len);
2935 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
2936 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
2937 			} else {
2938 				struct ip6_hdr ip6;
2939 
2940 				m_copydata(m0, offset, sizeof(ip6), &ip6);
2941 				ip6.ip6_plen = 0;
2942 				m_copyback(m0,
2943 				    offset + offsetof(struct ip6_hdr, ip6_plen),
2944 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
2945 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
2946 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
2947 			}
2948 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
2949 			    sizeof(th.th_sum), &th.th_sum);
2950 
2951 			tcp_hlen = th.th_off << 2;
2952 		} else {
2953 			/*
2954 			 * TCP/IP headers are in the first mbuf; we can do
2955 			 * this the easy way.
2956 			 */
2957 			struct tcphdr *th;
2958 
2959 			if (v4) {
2960 				struct ip *ip =
2961 				    (void *)(mtod(m0, char *) + offset);
2962 				th = (void *)(mtod(m0, char *) + hlen);
2963 
2964 				ip->ip_len = 0;
2965 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
2966 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
2967 			} else {
2968 				struct ip6_hdr *ip6 =
2969 				    (void *)(mtod(m0, char *) + offset);
2970 				th = (void *)(mtod(m0, char *) + hlen);
2971 
2972 				ip6->ip6_plen = 0;
2973 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
2974 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
2975 			}
2976 			tcp_hlen = th->th_off << 2;
2977 		}
2978 		hlen += tcp_hlen;
2979 		*cmdlenp |= NQTX_CMD_TSE;
2980 
2981 		if (v4) {
2982 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
2983 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
2984 		} else {
2985 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
2986 			*fieldsp |= NQTXD_FIELDS_TUXSM;
2987 		}
2988 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
2989 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2990 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
2991 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
2992 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
2993 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
2994 	} else {
2995 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
2996 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
2997 	}
2998 
2999 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
3000 		*fieldsp |= NQTXD_FIELDS_IXSM;
3001 		cmdc |= NQTXC_CMD_IP4;
3002 	}
3003 
3004 	if (m0->m_pkthdr.csum_flags &
3005 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3006 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
3007 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
3008 			cmdc |= NQTXC_CMD_TCP;
3009 		} else {
3010 			cmdc |= NQTXC_CMD_UDP;
3011 		}
3012 		cmdc |= NQTXC_CMD_IP4;
3013 		*fieldsp |= NQTXD_FIELDS_TUXSM;
3014 	}
3015 	if (m0->m_pkthdr.csum_flags &
3016 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3017 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
3018 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
3019 			cmdc |= NQTXC_CMD_TCP;
3020 		} else {
3021 			cmdc |= NQTXC_CMD_UDP;
3022 		}
3023 		cmdc |= NQTXC_CMD_IP6;
3024 		*fieldsp |= NQTXD_FIELDS_TUXSM;
3025 	}
3026 
3027 	/* Fill in the context descriptor. */
3028 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_vl_len =
3029 	    htole32(vl_len);
3030 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_sn = 0;
3031 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_cmd =
3032 	    htole32(cmdc);
3033 	sc->sc_nq_txdescs[sc->sc_txnext].nqrx_ctx.nqtxc_mssidx =
3034 	    htole32(mssidx);
3035 	WM_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE);
3036 	DPRINTF(WM_DEBUG_TX,
3037 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
3038 	    sc->sc_txnext, 0, vl_len));
3039 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
3040 	sc->sc_txnext = WM_NEXTTX(sc, sc->sc_txnext);
3041 	txs->txs_ndesc++;
3042 	return 0;
3043 }
3044 
3045 /*
3046  * wm_nq_start:		[ifnet interface function]
3047  *
3048  *	Start packet transmission on the interface for NEWQUEUE devices
3049  */
3050 static void
3051 wm_nq_start(struct ifnet *ifp)
3052 {
3053 	struct wm_softc *sc = ifp->if_softc;
3054 	struct mbuf *m0;
3055 	struct m_tag *mtag;
3056 	struct wm_txsoft *txs;
3057 	bus_dmamap_t dmamap;
3058 	int error, nexttx, lasttx = -1, seg, segs_needed;
3059 	bool do_csum, sent;
3060 
3061 	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
3062 		return;
3063 
3064 	sent = false;
3065 
3066 	/*
3067 	 * Loop through the send queue, setting up transmit descriptors
3068 	 * until we drain the queue, or use up all available transmit
3069 	 * descriptors.
3070 	 */
3071 	for (;;) {
3072 		/* Grab a packet off the queue. */
3073 		IFQ_POLL(&ifp->if_snd, m0);
3074 		if (m0 == NULL)
3075 			break;
3076 
3077 		DPRINTF(WM_DEBUG_TX,
3078 		    ("%s: TX: have packet to transmit: %p\n",
3079 		    device_xname(sc->sc_dev), m0));
3080 
3081 		/* Get a work queue entry. */
3082 		if (sc->sc_txsfree < WM_TXQUEUE_GC(sc)) {
3083 			wm_txintr(sc);
3084 			if (sc->sc_txsfree == 0) {
3085 				DPRINTF(WM_DEBUG_TX,
3086 				    ("%s: TX: no free job descriptors\n",
3087 					device_xname(sc->sc_dev)));
3088 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
3089 				break;
3090 			}
3091 		}
3092 
3093 		txs = &sc->sc_txsoft[sc->sc_txsnext];
3094 		dmamap = txs->txs_dmamap;
3095 
3096 		/*
3097 		 * Load the DMA map.  If this fails, the packet either
3098 		 * didn't fit in the allotted number of segments, or we
3099 		 * were short on resources.  For the too-many-segments
3100 		 * case, we simply report an error and drop the packet,
3101 		 * since we can't sanely copy a jumbo packet to a single
3102 		 * buffer.
3103 		 */
3104 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
3105 		    BUS_DMA_WRITE|BUS_DMA_NOWAIT);
3106 		if (error) {
3107 			if (error == EFBIG) {
3108 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
3109 				log(LOG_ERR, "%s: Tx packet consumes too many "
3110 				    "DMA segments, dropping...\n",
3111 				    device_xname(sc->sc_dev));
3112 				IFQ_DEQUEUE(&ifp->if_snd, m0);
3113 				wm_dump_mbuf_chain(sc, m0);
3114 				m_freem(m0);
3115 				continue;
3116 			}
3117 			/*
3118 			 * Short on resources, just stop for now.
3119 			 */
3120 			DPRINTF(WM_DEBUG_TX,
3121 			    ("%s: TX: dmamap load failed: %d\n",
3122 			    device_xname(sc->sc_dev), error));
3123 			break;
3124 		}
3125 
3126 		segs_needed = dmamap->dm_nsegs;
3127 
3128 		/*
3129 		 * Ensure we have enough descriptors free to describe
3130 		 * the packet.  Note, we always reserve one descriptor
3131 		 * at the end of the ring due to the semantics of the
3132 		 * TDT register, plus one more in the event we need
3133 		 * to load offload context.
3134 		 */
3135 		if (segs_needed > sc->sc_txfree - 2) {
3136 			/*
3137 			 * Not enough free descriptors to transmit this
3138 			 * packet.  We haven't committed anything yet,
3139 			 * so just unload the DMA map, put the packet
3140 			 * pack on the queue, and punt.  Notify the upper
3141 			 * layer that there are no more slots left.
3142 			 */
3143 			DPRINTF(WM_DEBUG_TX,
3144 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
3145 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
3146 			    segs_needed, sc->sc_txfree - 1));
3147 			ifp->if_flags |= IFF_OACTIVE;
3148 			bus_dmamap_unload(sc->sc_dmat, dmamap);
3149 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
3150 			break;
3151 		}
3152 
3153 		IFQ_DEQUEUE(&ifp->if_snd, m0);
3154 
3155 		/*
3156 		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
3157 		 */
3158 
3159 		DPRINTF(WM_DEBUG_TX,
3160 		    ("%s: TX: packet has %d (%d) DMA segments\n",
3161 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
3162 
3163 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
3164 
3165 		/*
3166 		 * Store a pointer to the packet so that we can free it
3167 		 * later.
3168 		 *
3169 		 * Initially, we consider the number of descriptors the
3170 		 * packet uses the number of DMA segments.  This may be
3171 		 * incremented by 1 if we do checksum offload (a descriptor
3172 		 * is used to set the checksum context).
3173 		 */
3174 		txs->txs_mbuf = m0;
3175 		txs->txs_firstdesc = sc->sc_txnext;
3176 		txs->txs_ndesc = segs_needed;
3177 
3178 		/* Set up offload parameters for this packet. */
3179 		uint32_t cmdlen, fields, dcmdlen;
3180 		if (m0->m_pkthdr.csum_flags &
3181 		    (M_CSUM_TSOv4|M_CSUM_TSOv6|
3182 		    M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4|
3183 		    M_CSUM_TCPv6|M_CSUM_UDPv6)) {
3184 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
3185 			    &do_csum) != 0) {
3186 				/* Error message already displayed. */
3187 				bus_dmamap_unload(sc->sc_dmat, dmamap);
3188 				continue;
3189 			}
3190 		} else {
3191 			do_csum = false;
3192 			cmdlen = 0;
3193 			fields = 0;
3194 		}
3195 
3196 		/* Sync the DMA map. */
3197 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
3198 		    BUS_DMASYNC_PREWRITE);
3199 
3200 		/*
3201 		 * Initialize the first transmit descriptor.
3202 		 */
3203 		nexttx = sc->sc_txnext;
3204 		if (!do_csum) {
3205 			/* setup a legacy descriptor */
3206 			wm_set_dma_addr(
3207 			    &sc->sc_txdescs[nexttx].wtx_addr,
3208 			    dmamap->dm_segs[0].ds_addr);
3209 			sc->sc_txdescs[nexttx].wtx_cmdlen =
3210 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
3211 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_status = 0;
3212 			sc->sc_txdescs[nexttx].wtx_fields.wtxu_options = 0;
3213 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
3214 			    NULL) {
3215 				sc->sc_txdescs[nexttx].wtx_cmdlen |=
3216 				    htole32(WTX_CMD_VLE);
3217 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan =
3218 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
3219 			} else {
3220 				sc->sc_txdescs[nexttx].wtx_fields.wtxu_vlan = 0;
3221 			}
3222 			dcmdlen = 0;
3223 		} else {
3224 			/* setup an advanced data descriptor */
3225 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3226 			    htole64(dmamap->dm_segs[0].ds_addr);
3227 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
3228 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3229 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
3230 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields =
3231 			    htole32(fields);
3232 			DPRINTF(WM_DEBUG_TX,
3233 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
3234 			    device_xname(sc->sc_dev), nexttx,
3235 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
3236 			DPRINTF(WM_DEBUG_TX,
3237 			    ("\t 0x%08x%08x\n", fields,
3238 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
3239 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
3240 		}
3241 
3242 		lasttx = nexttx;
3243 		nexttx = WM_NEXTTX(sc, nexttx);
3244 		/*
3245 		 * fill in the next descriptors. legacy or adcanced format
3246 		 * is the same here
3247 		 */
3248 		for (seg = 1; seg < dmamap->dm_nsegs;
3249 		    seg++, nexttx = WM_NEXTTX(sc, nexttx)) {
3250 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_addr =
3251 			    htole64(dmamap->dm_segs[seg].ds_addr);
3252 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_cmdlen =
3253 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
3254 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
3255 			sc->sc_nq_txdescs[nexttx].nqtx_data.nqtxd_fields = 0;
3256 			lasttx = nexttx;
3257 
3258 			DPRINTF(WM_DEBUG_TX,
3259 			    ("%s: TX: desc %d: %#" PRIx64 ", "
3260 			     "len %#04zx\n",
3261 			    device_xname(sc->sc_dev), nexttx,
3262 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
3263 			    dmamap->dm_segs[seg].ds_len));
3264 		}
3265 
3266 		KASSERT(lasttx != -1);
3267 
3268 		/*
3269 		 * Set up the command byte on the last descriptor of
3270 		 * the packet.  If we're in the interrupt delay window,
3271 		 * delay the interrupt.
3272 		 */
3273 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
3274 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
3275 		sc->sc_txdescs[lasttx].wtx_cmdlen |=
3276 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
3277 
3278 		txs->txs_lastdesc = lasttx;
3279 
3280 		DPRINTF(WM_DEBUG_TX,
3281 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
3282 		    device_xname(sc->sc_dev),
3283 		    lasttx, le32toh(sc->sc_txdescs[lasttx].wtx_cmdlen)));
3284 
3285 		/* Sync the descriptors we're using. */
3286 		WM_CDTXSYNC(sc, sc->sc_txnext, txs->txs_ndesc,
3287 		    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3288 
3289 		/* Give the packet to the chip. */
3290 		CSR_WRITE(sc, sc->sc_tdt_reg, nexttx);
3291 		sent = true;
3292 
3293 		DPRINTF(WM_DEBUG_TX,
3294 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
3295 
3296 		DPRINTF(WM_DEBUG_TX,
3297 		    ("%s: TX: finished transmitting packet, job %d\n",
3298 		    device_xname(sc->sc_dev), sc->sc_txsnext));
3299 
3300 		/* Advance the tx pointer. */
3301 		sc->sc_txfree -= txs->txs_ndesc;
3302 		sc->sc_txnext = nexttx;
3303 
3304 		sc->sc_txsfree--;
3305 		sc->sc_txsnext = WM_NEXTTXS(sc, sc->sc_txsnext);
3306 
3307 		/* Pass the packet to any BPF listeners. */
3308 		bpf_mtap(ifp, m0);
3309 	}
3310 
3311 	if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) {
3312 		/* No more slots; notify upper layer. */
3313 		ifp->if_flags |= IFF_OACTIVE;
3314 	}
3315 
3316 	if (sent) {
3317 		/* Set a watchdog timer in case the chip flakes out. */
3318 		ifp->if_timer = 5;
3319 	}
3320 }
3321 
3322 /*
3323  * wm_watchdog:		[ifnet interface function]
3324  *
3325  *	Watchdog timer handler.
3326  */
3327 static void
3328 wm_watchdog(struct ifnet *ifp)
3329 {
3330 	struct wm_softc *sc = ifp->if_softc;
3331 
3332 	/*
3333 	 * Since we're using delayed interrupts, sweep up
3334 	 * before we report an error.
3335 	 */
3336 	wm_txintr(sc);
3337 
3338 	if (sc->sc_txfree != WM_NTXDESC(sc)) {
3339 #ifdef WM_DEBUG
3340 		int i, j;
3341 		struct wm_txsoft *txs;
3342 #endif
3343 		log(LOG_ERR,
3344 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3345 		    device_xname(sc->sc_dev), sc->sc_txfree, sc->sc_txsfree,
3346 		    sc->sc_txnext);
3347 		ifp->if_oerrors++;
3348 #ifdef WM_DEBUG
3349 		for (i = sc->sc_txsdirty; i != sc->sc_txsnext ;
3350 		    i = WM_NEXTTXS(sc, i)) {
3351 		    txs = &sc->sc_txsoft[i];
3352 		    printf("txs %d tx %d -> %d\n",
3353 			i, txs->txs_firstdesc, txs->txs_lastdesc);
3354 		    for (j = txs->txs_firstdesc; ;
3355 			j = WM_NEXTTX(sc, j)) {
3356 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3357 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_addr);
3358 			printf("\t %#08x%08x\n",
3359 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_fields,
3360 			    sc->sc_nq_txdescs[j].nqtx_data.nqtxd_cmdlen);
3361 			if (j == txs->txs_lastdesc)
3362 				break;
3363 			}
3364 		}
3365 #endif
3366 		/* Reset the interface. */
3367 		(void) wm_init(ifp);
3368 	}
3369 
3370 	/* Try to get more packets going. */
3371 	ifp->if_start(ifp);
3372 }
3373 
3374 static int
3375 wm_ifflags_cb(struct ethercom *ec)
3376 {
3377 	struct ifnet *ifp = &ec->ec_if;
3378 	struct wm_softc *sc = ifp->if_softc;
3379 	int change = ifp->if_flags ^ sc->sc_if_flags;
3380 
3381 	if (change != 0)
3382 		sc->sc_if_flags = ifp->if_flags;
3383 
3384 	if ((change & ~(IFF_CANTCHANGE|IFF_DEBUG)) != 0)
3385 		return ENETRESET;
3386 
3387 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3388 		wm_set_filter(sc);
3389 
3390 	wm_set_vlan(sc);
3391 
3392 	return 0;
3393 }
3394 
3395 /*
3396  * wm_ioctl:		[ifnet interface function]
3397  *
3398  *	Handle control requests from the operator.
3399  */
3400 static int
3401 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3402 {
3403 	struct wm_softc *sc = ifp->if_softc;
3404 	struct ifreq *ifr = (struct ifreq *) data;
3405 	struct ifaddr *ifa = (struct ifaddr *)data;
3406 	struct sockaddr_dl *sdl;
3407 	int s, error;
3408 
3409 	s = splnet();
3410 
3411 	switch (cmd) {
3412 	case SIOCSIFMEDIA:
3413 	case SIOCGIFMEDIA:
3414 		/* Flow control requires full-duplex mode. */
3415 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3416 		    (ifr->ifr_media & IFM_FDX) == 0)
3417 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3418 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3419 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3420 				/* We can do both TXPAUSE and RXPAUSE. */
3421 				ifr->ifr_media |=
3422 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3423 			}
3424 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3425 		}
3426 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3427 		break;
3428 	case SIOCINITIFADDR:
3429 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3430 			sdl = satosdl(ifp->if_dl->ifa_addr);
3431 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3432 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3433 			/* unicast address is first multicast entry */
3434 			wm_set_filter(sc);
3435 			error = 0;
3436 			break;
3437 		}
3438 		/*FALLTHROUGH*/
3439 	default:
3440 		if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET)
3441 			break;
3442 
3443 		error = 0;
3444 
3445 		if (cmd == SIOCSIFCAP)
3446 			error = (*ifp->if_init)(ifp);
3447 		else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3448 			;
3449 		else if (ifp->if_flags & IFF_RUNNING) {
3450 			/*
3451 			 * Multicast list has changed; set the hardware filter
3452 			 * accordingly.
3453 			 */
3454 			wm_set_filter(sc);
3455 		}
3456 		break;
3457 	}
3458 
3459 	/* Try to get more packets going. */
3460 	ifp->if_start(ifp);
3461 
3462 	splx(s);
3463 	return error;
3464 }
3465 
3466 /*
3467  * wm_intr:
3468  *
3469  *	Interrupt service routine.
3470  */
3471 static int
3472 wm_intr(void *arg)
3473 {
3474 	struct wm_softc *sc = arg;
3475 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3476 	uint32_t icr;
3477 	int handled = 0;
3478 
3479 	while (1 /* CONSTCOND */) {
3480 		icr = CSR_READ(sc, WMREG_ICR);
3481 		if ((icr & sc->sc_icr) == 0)
3482 			break;
3483 		rnd_add_uint32(&sc->rnd_source, icr);
3484 
3485 		handled = 1;
3486 
3487 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3488 		if (icr & (ICR_RXDMT0|ICR_RXT0)) {
3489 			DPRINTF(WM_DEBUG_RX,
3490 			    ("%s: RX: got Rx intr 0x%08x\n",
3491 			    device_xname(sc->sc_dev),
3492 			    icr & (ICR_RXDMT0|ICR_RXT0)));
3493 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
3494 		}
3495 #endif
3496 		wm_rxintr(sc);
3497 
3498 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
3499 		if (icr & ICR_TXDW) {
3500 			DPRINTF(WM_DEBUG_TX,
3501 			    ("%s: TX: got TXDW interrupt\n",
3502 			    device_xname(sc->sc_dev)));
3503 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
3504 		}
3505 #endif
3506 		wm_txintr(sc);
3507 
3508 		if (icr & (ICR_LSC|ICR_RXSEQ|ICR_RXCFG)) {
3509 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
3510 			wm_linkintr(sc, icr);
3511 		}
3512 
3513 		if (icr & ICR_RXO) {
3514 #if defined(WM_DEBUG)
3515 			log(LOG_WARNING, "%s: Receive overrun\n",
3516 			    device_xname(sc->sc_dev));
3517 #endif /* defined(WM_DEBUG) */
3518 		}
3519 	}
3520 
3521 	if (handled) {
3522 		/* Try to get more packets going. */
3523 		ifp->if_start(ifp);
3524 	}
3525 
3526 	return handled;
3527 }
3528 
3529 /*
3530  * wm_txintr:
3531  *
3532  *	Helper; handle transmit interrupts.
3533  */
3534 static void
3535 wm_txintr(struct wm_softc *sc)
3536 {
3537 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3538 	struct wm_txsoft *txs;
3539 	uint8_t status;
3540 	int i;
3541 
3542 	ifp->if_flags &= ~IFF_OACTIVE;
3543 
3544 	/*
3545 	 * Go through the Tx list and free mbufs for those
3546 	 * frames which have been transmitted.
3547 	 */
3548 	for (i = sc->sc_txsdirty; sc->sc_txsfree != WM_TXQUEUELEN(sc);
3549 	     i = WM_NEXTTXS(sc, i), sc->sc_txsfree++) {
3550 		txs = &sc->sc_txsoft[i];
3551 
3552 		DPRINTF(WM_DEBUG_TX,
3553 		    ("%s: TX: checking job %d\n", device_xname(sc->sc_dev), i));
3554 
3555 		WM_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_ndesc,
3556 		    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3557 
3558 		status =
3559 		    sc->sc_txdescs[txs->txs_lastdesc].wtx_fields.wtxu_status;
3560 		if ((status & WTX_ST_DD) == 0) {
3561 			WM_CDTXSYNC(sc, txs->txs_lastdesc, 1,
3562 			    BUS_DMASYNC_PREREAD);
3563 			break;
3564 		}
3565 
3566 		DPRINTF(WM_DEBUG_TX,
3567 		    ("%s: TX: job %d done: descs %d..%d\n",
3568 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
3569 		    txs->txs_lastdesc));
3570 
3571 		/*
3572 		 * XXX We should probably be using the statistics
3573 		 * XXX registers, but I don't know if they exist
3574 		 * XXX on chips before the i82544.
3575 		 */
3576 
3577 #ifdef WM_EVENT_COUNTERS
3578 		if (status & WTX_ST_TU)
3579 			WM_EVCNT_INCR(&sc->sc_ev_tu);
3580 #endif /* WM_EVENT_COUNTERS */
3581 
3582 		if (status & (WTX_ST_EC|WTX_ST_LC)) {
3583 			ifp->if_oerrors++;
3584 			if (status & WTX_ST_LC)
3585 				log(LOG_WARNING, "%s: late collision\n",
3586 				    device_xname(sc->sc_dev));
3587 			else if (status & WTX_ST_EC) {
3588 				ifp->if_collisions += 16;
3589 				log(LOG_WARNING, "%s: excessive collisions\n",
3590 				    device_xname(sc->sc_dev));
3591 			}
3592 		} else
3593 			ifp->if_opackets++;
3594 
3595 		sc->sc_txfree += txs->txs_ndesc;
3596 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
3597 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3598 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
3599 		m_freem(txs->txs_mbuf);
3600 		txs->txs_mbuf = NULL;
3601 	}
3602 
3603 	/* Update the dirty transmit buffer pointer. */
3604 	sc->sc_txsdirty = i;
3605 	DPRINTF(WM_DEBUG_TX,
3606 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
3607 
3608 	/*
3609 	 * If there are no more pending transmissions, cancel the watchdog
3610 	 * timer.
3611 	 */
3612 	if (sc->sc_txsfree == WM_TXQUEUELEN(sc))
3613 		ifp->if_timer = 0;
3614 }
3615 
3616 /*
3617  * wm_rxintr:
3618  *
3619  *	Helper; handle receive interrupts.
3620  */
3621 static void
3622 wm_rxintr(struct wm_softc *sc)
3623 {
3624 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3625 	struct wm_rxsoft *rxs;
3626 	struct mbuf *m;
3627 	int i, len;
3628 	uint8_t status, errors;
3629 	uint16_t vlantag;
3630 
3631 	for (i = sc->sc_rxptr;; i = WM_NEXTRX(i)) {
3632 		rxs = &sc->sc_rxsoft[i];
3633 
3634 		DPRINTF(WM_DEBUG_RX,
3635 		    ("%s: RX: checking descriptor %d\n",
3636 		    device_xname(sc->sc_dev), i));
3637 
3638 		WM_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3639 
3640 		status = sc->sc_rxdescs[i].wrx_status;
3641 		errors = sc->sc_rxdescs[i].wrx_errors;
3642 		len = le16toh(sc->sc_rxdescs[i].wrx_len);
3643 		vlantag = sc->sc_rxdescs[i].wrx_special;
3644 
3645 		if ((status & WRX_ST_DD) == 0) {
3646 			/*
3647 			 * We have processed all of the receive descriptors.
3648 			 */
3649 			WM_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD);
3650 			break;
3651 		}
3652 
3653 		if (__predict_false(sc->sc_rxdiscard)) {
3654 			DPRINTF(WM_DEBUG_RX,
3655 			    ("%s: RX: discarding contents of descriptor %d\n",
3656 			    device_xname(sc->sc_dev), i));
3657 			WM_INIT_RXDESC(sc, i);
3658 			if (status & WRX_ST_EOP) {
3659 				/* Reset our state. */
3660 				DPRINTF(WM_DEBUG_RX,
3661 				    ("%s: RX: resetting rxdiscard -> 0\n",
3662 				    device_xname(sc->sc_dev)));
3663 				sc->sc_rxdiscard = 0;
3664 			}
3665 			continue;
3666 		}
3667 
3668 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3669 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
3670 
3671 		m = rxs->rxs_mbuf;
3672 
3673 		/*
3674 		 * Add a new receive buffer to the ring, unless of
3675 		 * course the length is zero. Treat the latter as a
3676 		 * failed mapping.
3677 		 */
3678 		if ((len == 0) || (wm_add_rxbuf(sc, i) != 0)) {
3679 			/*
3680 			 * Failed, throw away what we've done so
3681 			 * far, and discard the rest of the packet.
3682 			 */
3683 			ifp->if_ierrors++;
3684 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3685 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3686 			WM_INIT_RXDESC(sc, i);
3687 			if ((status & WRX_ST_EOP) == 0)
3688 				sc->sc_rxdiscard = 1;
3689 			if (sc->sc_rxhead != NULL)
3690 				m_freem(sc->sc_rxhead);
3691 			WM_RXCHAIN_RESET(sc);
3692 			DPRINTF(WM_DEBUG_RX,
3693 			    ("%s: RX: Rx buffer allocation failed, "
3694 			    "dropping packet%s\n", device_xname(sc->sc_dev),
3695 			    sc->sc_rxdiscard ? " (discard)" : ""));
3696 			continue;
3697 		}
3698 
3699 		m->m_len = len;
3700 		sc->sc_rxlen += len;
3701 		DPRINTF(WM_DEBUG_RX,
3702 		    ("%s: RX: buffer at %p len %d\n",
3703 		    device_xname(sc->sc_dev), m->m_data, len));
3704 
3705 		/*
3706 		 * If this is not the end of the packet, keep
3707 		 * looking.
3708 		 */
3709 		if ((status & WRX_ST_EOP) == 0) {
3710 			WM_RXCHAIN_LINK(sc, m);
3711 			DPRINTF(WM_DEBUG_RX,
3712 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
3713 			    device_xname(sc->sc_dev), sc->sc_rxlen));
3714 			continue;
3715 		}
3716 
3717 		/*
3718 		 * Okay, we have the entire packet now.  The chip is
3719 		 * configured to include the FCS except I350 and I21[01]
3720 		 * (not all chips can be configured to strip it),
3721 		 * so we need to trim it.
3722 		 * May need to adjust length of previous mbuf in the
3723 		 * chain if the current mbuf is too short.
3724 		 * For an eratta, the RCTL_SECRC bit in RCTL register
3725 		 * is always set in I350, so we don't trim it.
3726 		 */
3727 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
3728 		    && (sc->sc_type != WM_T_I210)
3729 		    && (sc->sc_type != WM_T_I211)) {
3730 			if (m->m_len < ETHER_CRC_LEN) {
3731 				sc->sc_rxtail->m_len
3732 				    -= (ETHER_CRC_LEN - m->m_len);
3733 				m->m_len = 0;
3734 			} else
3735 				m->m_len -= ETHER_CRC_LEN;
3736 			len = sc->sc_rxlen - ETHER_CRC_LEN;
3737 		} else
3738 			len = sc->sc_rxlen;
3739 
3740 		WM_RXCHAIN_LINK(sc, m);
3741 
3742 		*sc->sc_rxtailp = NULL;
3743 		m = sc->sc_rxhead;
3744 
3745 		WM_RXCHAIN_RESET(sc);
3746 
3747 		DPRINTF(WM_DEBUG_RX,
3748 		    ("%s: RX: have entire packet, len -> %d\n",
3749 		    device_xname(sc->sc_dev), len));
3750 
3751 		/*
3752 		 * If an error occurred, update stats and drop the packet.
3753 		 */
3754 		if (errors &
3755 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
3756 			if (errors & WRX_ER_SE)
3757 				log(LOG_WARNING, "%s: symbol error\n",
3758 				    device_xname(sc->sc_dev));
3759 			else if (errors & WRX_ER_SEQ)
3760 				log(LOG_WARNING, "%s: receive sequence error\n",
3761 				    device_xname(sc->sc_dev));
3762 			else if (errors & WRX_ER_CE)
3763 				log(LOG_WARNING, "%s: CRC error\n",
3764 				    device_xname(sc->sc_dev));
3765 			m_freem(m);
3766 			continue;
3767 		}
3768 
3769 		/*
3770 		 * No errors.  Receive the packet.
3771 		 */
3772 		m->m_pkthdr.rcvif = ifp;
3773 		m->m_pkthdr.len = len;
3774 
3775 		/*
3776 		 * If VLANs are enabled, VLAN packets have been unwrapped
3777 		 * for us.  Associate the tag with the packet.
3778 		 */
3779 		/* XXXX should check for i350 and i354 */
3780 		if ((status & WRX_ST_VP) != 0) {
3781 			VLAN_INPUT_TAG(ifp, m,
3782 			    le16toh(vlantag),
3783 			    continue);
3784 		}
3785 
3786 		/*
3787 		 * Set up checksum info for this packet.
3788 		 */
3789 		if ((status & WRX_ST_IXSM) == 0) {
3790 			if (status & WRX_ST_IPCS) {
3791 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
3792 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
3793 				if (errors & WRX_ER_IPE)
3794 					m->m_pkthdr.csum_flags |=
3795 					    M_CSUM_IPv4_BAD;
3796 			}
3797 			if (status & WRX_ST_TCPCS) {
3798 				/*
3799 				 * Note: we don't know if this was TCP or UDP,
3800 				 * so we just set both bits, and expect the
3801 				 * upper layers to deal.
3802 				 */
3803 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
3804 				m->m_pkthdr.csum_flags |=
3805 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
3806 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
3807 				if (errors & WRX_ER_TCPE)
3808 					m->m_pkthdr.csum_flags |=
3809 					    M_CSUM_TCP_UDP_BAD;
3810 			}
3811 		}
3812 
3813 		ifp->if_ipackets++;
3814 
3815 		/* Pass this up to any BPF listeners. */
3816 		bpf_mtap(ifp, m);
3817 
3818 		/* Pass it on. */
3819 		(*ifp->if_input)(ifp, m);
3820 	}
3821 
3822 	/* Update the receive pointer. */
3823 	sc->sc_rxptr = i;
3824 
3825 	DPRINTF(WM_DEBUG_RX,
3826 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
3827 }
3828 
3829 /*
3830  * wm_linkintr_gmii:
3831  *
3832  *	Helper; handle link interrupts for GMII.
3833  */
3834 static void
3835 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
3836 {
3837 
3838 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3839 		__func__));
3840 
3841 	if (icr & ICR_LSC) {
3842 		DPRINTF(WM_DEBUG_LINK,
3843 		    ("%s: LINK: LSC -> mii_pollstat\n",
3844 			device_xname(sc->sc_dev)));
3845 		mii_pollstat(&sc->sc_mii);
3846 		if (sc->sc_type == WM_T_82543) {
3847 			int miistatus, active;
3848 
3849 			/*
3850 			 * With 82543, we need to force speed and
3851 			 * duplex on the MAC equal to what the PHY
3852 			 * speed and duplex configuration is.
3853 			 */
3854 			miistatus = sc->sc_mii.mii_media_status;
3855 
3856 			if (miistatus & IFM_ACTIVE) {
3857 				active = sc->sc_mii.mii_media_active;
3858 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
3859 				switch (IFM_SUBTYPE(active)) {
3860 				case IFM_10_T:
3861 					sc->sc_ctrl |= CTRL_SPEED_10;
3862 					break;
3863 				case IFM_100_TX:
3864 					sc->sc_ctrl |= CTRL_SPEED_100;
3865 					break;
3866 				case IFM_1000_T:
3867 					sc->sc_ctrl |= CTRL_SPEED_1000;
3868 					break;
3869 				default:
3870 					/*
3871 					 * fiber?
3872 					 * Shoud not enter here.
3873 					 */
3874 					printf("unknown media (%x)\n",
3875 					    active);
3876 					break;
3877 				}
3878 				if (active & IFM_FDX)
3879 					sc->sc_ctrl |= CTRL_FD;
3880 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3881 			}
3882 		} else if ((sc->sc_type == WM_T_ICH8)
3883 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
3884 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
3885 		} else if (sc->sc_type == WM_T_PCH) {
3886 			wm_k1_gig_workaround_hv(sc,
3887 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
3888 		}
3889 
3890 		if ((sc->sc_phytype == WMPHY_82578)
3891 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
3892 			== IFM_1000_T)) {
3893 
3894 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
3895 				delay(200*1000); /* XXX too big */
3896 
3897 				/* Link stall fix for link up */
3898 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3899 				    HV_MUX_DATA_CTRL,
3900 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
3901 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
3902 				wm_gmii_hv_writereg(sc->sc_dev, 1,
3903 				    HV_MUX_DATA_CTRL,
3904 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
3905 			}
3906 		}
3907 	} else if (icr & ICR_RXSEQ) {
3908 		DPRINTF(WM_DEBUG_LINK,
3909 		    ("%s: LINK Receive sequence error\n",
3910 			device_xname(sc->sc_dev)));
3911 	}
3912 }
3913 
3914 /*
3915  * wm_linkintr_tbi:
3916  *
3917  *	Helper; handle link interrupts for TBI mode.
3918  */
3919 static void
3920 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
3921 {
3922 	uint32_t status;
3923 
3924 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
3925 		__func__));
3926 
3927 	status = CSR_READ(sc, WMREG_STATUS);
3928 	if (icr & ICR_LSC) {
3929 		if (status & STATUS_LU) {
3930 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
3931 			    device_xname(sc->sc_dev),
3932 			    (status & STATUS_FD) ? "FDX" : "HDX"));
3933 			/*
3934 			 * NOTE: CTRL will update TFCE and RFCE automatically,
3935 			 * so we should update sc->sc_ctrl
3936 			 */
3937 
3938 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3939 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
3940 			sc->sc_fcrtl &= ~FCRTL_XONE;
3941 			if (status & STATUS_FD)
3942 				sc->sc_tctl |=
3943 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
3944 			else
3945 				sc->sc_tctl |=
3946 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
3947 			if (sc->sc_ctrl & CTRL_TFCE)
3948 				sc->sc_fcrtl |= FCRTL_XONE;
3949 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
3950 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
3951 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
3952 				      sc->sc_fcrtl);
3953 			sc->sc_tbi_linkup = 1;
3954 		} else {
3955 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
3956 			    device_xname(sc->sc_dev)));
3957 			sc->sc_tbi_linkup = 0;
3958 		}
3959 		wm_tbi_set_linkled(sc);
3960 	} else if (icr & ICR_RXCFG) {
3961 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: receiving /C/\n",
3962 		    device_xname(sc->sc_dev)));
3963 		sc->sc_tbi_nrxcfg++;
3964 		wm_check_for_link(sc);
3965 	} else if (icr & ICR_RXSEQ) {
3966 		DPRINTF(WM_DEBUG_LINK,
3967 		    ("%s: LINK: Receive sequence error\n",
3968 		    device_xname(sc->sc_dev)));
3969 	}
3970 }
3971 
3972 /*
3973  * wm_linkintr:
3974  *
3975  *	Helper; handle link interrupts.
3976  */
3977 static void
3978 wm_linkintr(struct wm_softc *sc, uint32_t icr)
3979 {
3980 
3981 	if (sc->sc_flags & WM_F_HAS_MII)
3982 		wm_linkintr_gmii(sc, icr);
3983 	else
3984 		wm_linkintr_tbi(sc, icr);
3985 }
3986 
3987 /*
3988  * wm_tick:
3989  *
3990  *	One second timer, used to check link status, sweep up
3991  *	completed transmit jobs, etc.
3992  */
3993 static void
3994 wm_tick(void *arg)
3995 {
3996 	struct wm_softc *sc = arg;
3997 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3998 	int s;
3999 
4000 	s = splnet();
4001 
4002 	if (sc->sc_type >= WM_T_82542_2_1) {
4003 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
4004 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
4005 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
4006 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
4007 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
4008 	}
4009 
4010 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4011 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
4012 	    + CSR_READ(sc, WMREG_CRCERRS)
4013 	    + CSR_READ(sc, WMREG_ALGNERRC)
4014 	    + CSR_READ(sc, WMREG_SYMERRC)
4015 	    + CSR_READ(sc, WMREG_RXERRC)
4016 	    + CSR_READ(sc, WMREG_SEC)
4017 	    + CSR_READ(sc, WMREG_CEXTERR)
4018 	    + CSR_READ(sc, WMREG_RLEC);
4019 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
4020 
4021 	if (sc->sc_flags & WM_F_HAS_MII)
4022 		mii_tick(&sc->sc_mii);
4023 	else
4024 		wm_tbi_check_link(sc);
4025 
4026 	splx(s);
4027 
4028 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4029 }
4030 
4031 /*
4032  * wm_reset:
4033  *
4034  *	Reset the i82542 chip.
4035  */
4036 static void
4037 wm_reset(struct wm_softc *sc)
4038 {
4039 	int phy_reset = 0;
4040 	uint32_t reg, mask;
4041 
4042 	/*
4043 	 * Allocate on-chip memory according to the MTU size.
4044 	 * The Packet Buffer Allocation register must be written
4045 	 * before the chip is reset.
4046 	 */
4047 	switch (sc->sc_type) {
4048 	case WM_T_82547:
4049 	case WM_T_82547_2:
4050 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4051 		    PBA_22K : PBA_30K;
4052 		sc->sc_txfifo_head = 0;
4053 		sc->sc_txfifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4054 		sc->sc_txfifo_size =
4055 		    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4056 		sc->sc_txfifo_stall = 0;
4057 		break;
4058 	case WM_T_82571:
4059 	case WM_T_82572:
4060 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4061 	case WM_T_I350:
4062 	case WM_T_I354:
4063 	case WM_T_80003:
4064 		sc->sc_pba = PBA_32K;
4065 		break;
4066 	case WM_T_82580:
4067 	case WM_T_82580ER:
4068 		sc->sc_pba = PBA_35K;
4069 		break;
4070 	case WM_T_I210:
4071 	case WM_T_I211:
4072 		sc->sc_pba = PBA_34K;
4073 		break;
4074 	case WM_T_82576:
4075 		sc->sc_pba = PBA_64K;
4076 		break;
4077 	case WM_T_82573:
4078 		sc->sc_pba = PBA_12K;
4079 		break;
4080 	case WM_T_82574:
4081 	case WM_T_82583:
4082 		sc->sc_pba = PBA_20K;
4083 		break;
4084 	case WM_T_ICH8:
4085 		sc->sc_pba = PBA_8K;
4086 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4087 		break;
4088 	case WM_T_ICH9:
4089 	case WM_T_ICH10:
4090 		sc->sc_pba = PBA_10K;
4091 		break;
4092 	case WM_T_PCH:
4093 	case WM_T_PCH2:
4094 	case WM_T_PCH_LPT:
4095 		sc->sc_pba = PBA_26K;
4096 		break;
4097 	default:
4098 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4099 		    PBA_40K : PBA_48K;
4100 		break;
4101 	}
4102 	CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4103 
4104 	/* Prevent the PCI-E bus from sticking */
4105 	if (sc->sc_flags & WM_F_PCIE) {
4106 		int timeout = 800;
4107 
4108 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4109 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4110 
4111 		while (timeout--) {
4112 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4113 			    == 0)
4114 				break;
4115 			delay(100);
4116 		}
4117 	}
4118 
4119 	/* Set the completion timeout for interface */
4120 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4121 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4122 		wm_set_pcie_completion_timeout(sc);
4123 
4124 	/* Clear interrupt */
4125 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4126 
4127 	/* Stop the transmit and receive processes. */
4128 	CSR_WRITE(sc, WMREG_RCTL, 0);
4129 	sc->sc_rctl &= ~RCTL_EN;
4130 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4131 	CSR_WRITE_FLUSH(sc);
4132 
4133 	/* XXX set_tbi_sbp_82543() */
4134 
4135 	delay(10*1000);
4136 
4137 	/* Must acquire the MDIO ownership before MAC reset */
4138 	switch (sc->sc_type) {
4139 	case WM_T_82573:
4140 	case WM_T_82574:
4141 	case WM_T_82583:
4142 		wm_get_hw_semaphore_82573(sc);
4143 		break;
4144 	default:
4145 		break;
4146 	}
4147 
4148 	/*
4149 	 * 82541 Errata 29? & 82547 Errata 28?
4150 	 * See also the description about PHY_RST bit in CTRL register
4151 	 * in 8254x_GBe_SDM.pdf.
4152 	 */
4153 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4154 		CSR_WRITE(sc, WMREG_CTRL,
4155 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4156 		CSR_WRITE_FLUSH(sc);
4157 		delay(5000);
4158 	}
4159 
4160 	switch (sc->sc_type) {
4161 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4162 	case WM_T_82541:
4163 	case WM_T_82541_2:
4164 	case WM_T_82547:
4165 	case WM_T_82547_2:
4166 		/*
4167 		 * On some chipsets, a reset through a memory-mapped write
4168 		 * cycle can cause the chip to reset before completing the
4169 		 * write cycle.  This causes major headache that can be
4170 		 * avoided by issuing the reset via indirect register writes
4171 		 * through I/O space.
4172 		 *
4173 		 * So, if we successfully mapped the I/O BAR at attach time,
4174 		 * use that.  Otherwise, try our luck with a memory-mapped
4175 		 * reset.
4176 		 */
4177 		if (sc->sc_flags & WM_F_IOH_VALID)
4178 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4179 		else
4180 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4181 		break;
4182 	case WM_T_82545_3:
4183 	case WM_T_82546_3:
4184 		/* Use the shadow control register on these chips. */
4185 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4186 		break;
4187 	case WM_T_80003:
4188 		mask = swfwphysem[sc->sc_funcid];
4189 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4190 		wm_get_swfw_semaphore(sc, mask);
4191 		CSR_WRITE(sc, WMREG_CTRL, reg);
4192 		wm_put_swfw_semaphore(sc, mask);
4193 		break;
4194 	case WM_T_ICH8:
4195 	case WM_T_ICH9:
4196 	case WM_T_ICH10:
4197 	case WM_T_PCH:
4198 	case WM_T_PCH2:
4199 	case WM_T_PCH_LPT:
4200 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4201 		if (wm_check_reset_block(sc) == 0) {
4202 			/*
4203 			 * Gate automatic PHY configuration by hardware on
4204 			 * non-managed 82579
4205 			 */
4206 			if ((sc->sc_type == WM_T_PCH2)
4207 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4208 				!= 0))
4209 				wm_gate_hw_phy_config_ich8lan(sc, 1);
4210 
4211 
4212 			reg |= CTRL_PHY_RESET;
4213 			phy_reset = 1;
4214 		}
4215 		wm_get_swfwhw_semaphore(sc);
4216 		CSR_WRITE(sc, WMREG_CTRL, reg);
4217 		/* Don't insert a completion barrier when reset */
4218 		delay(20*1000);
4219 		wm_put_swfwhw_semaphore(sc);
4220 		break;
4221 	case WM_T_82542_2_0:
4222 	case WM_T_82542_2_1:
4223 	case WM_T_82543:
4224 	case WM_T_82540:
4225 	case WM_T_82545:
4226 	case WM_T_82546:
4227 	case WM_T_82571:
4228 	case WM_T_82572:
4229 	case WM_T_82573:
4230 	case WM_T_82574:
4231 	case WM_T_82575:
4232 	case WM_T_82576:
4233 	case WM_T_82580:
4234 	case WM_T_82580ER:
4235 	case WM_T_82583:
4236 	case WM_T_I350:
4237 	case WM_T_I354:
4238 	case WM_T_I210:
4239 	case WM_T_I211:
4240 	default:
4241 		/* Everything else can safely use the documented method. */
4242 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4243 		break;
4244 	}
4245 
4246 	/* Must release the MDIO ownership after MAC reset */
4247 	switch (sc->sc_type) {
4248 	case WM_T_82574:
4249 	case WM_T_82583:
4250 		wm_put_hw_semaphore_82573(sc);
4251 		break;
4252 	default:
4253 		break;
4254 	}
4255 
4256 	if (phy_reset != 0)
4257 		wm_get_cfg_done(sc);
4258 
4259 	/* reload EEPROM */
4260 	switch (sc->sc_type) {
4261 	case WM_T_82542_2_0:
4262 	case WM_T_82542_2_1:
4263 	case WM_T_82543:
4264 	case WM_T_82544:
4265 		delay(10);
4266 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4267 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4268 		CSR_WRITE_FLUSH(sc);
4269 		delay(2000);
4270 		break;
4271 	case WM_T_82540:
4272 	case WM_T_82545:
4273 	case WM_T_82545_3:
4274 	case WM_T_82546:
4275 	case WM_T_82546_3:
4276 		delay(5*1000);
4277 		/* XXX Disable HW ARPs on ASF enabled adapters */
4278 		break;
4279 	case WM_T_82541:
4280 	case WM_T_82541_2:
4281 	case WM_T_82547:
4282 	case WM_T_82547_2:
4283 		delay(20000);
4284 		/* XXX Disable HW ARPs on ASF enabled adapters */
4285 		break;
4286 	case WM_T_82571:
4287 	case WM_T_82572:
4288 	case WM_T_82573:
4289 	case WM_T_82574:
4290 	case WM_T_82583:
4291 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4292 			delay(10);
4293 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4294 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4295 			CSR_WRITE_FLUSH(sc);
4296 		}
4297 		/* check EECD_EE_AUTORD */
4298 		wm_get_auto_rd_done(sc);
4299 		/*
4300 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4301 		 * is set.
4302 		 */
4303 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4304 		    || (sc->sc_type == WM_T_82583))
4305 			delay(25*1000);
4306 		break;
4307 	case WM_T_82575:
4308 	case WM_T_82576:
4309 	case WM_T_82580:
4310 	case WM_T_82580ER:
4311 	case WM_T_I350:
4312 	case WM_T_I354:
4313 	case WM_T_I210:
4314 	case WM_T_I211:
4315 	case WM_T_80003:
4316 		/* check EECD_EE_AUTORD */
4317 		wm_get_auto_rd_done(sc);
4318 		break;
4319 	case WM_T_ICH8:
4320 	case WM_T_ICH9:
4321 	case WM_T_ICH10:
4322 	case WM_T_PCH:
4323 	case WM_T_PCH2:
4324 	case WM_T_PCH_LPT:
4325 		break;
4326 	default:
4327 		panic("%s: unknown type\n", __func__);
4328 	}
4329 
4330 	/* Check whether EEPROM is present or not */
4331 	switch (sc->sc_type) {
4332 	case WM_T_82575:
4333 	case WM_T_82576:
4334 #if 0 /* XXX */
4335 	case WM_T_82580:
4336 	case WM_T_82580ER:
4337 #endif
4338 	case WM_T_I350:
4339 	case WM_T_I354:
4340 	case WM_T_ICH8:
4341 	case WM_T_ICH9:
4342 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4343 			/* Not found */
4344 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4345 			if ((sc->sc_type == WM_T_82575)
4346 			    || (sc->sc_type == WM_T_82576)
4347 			    || (sc->sc_type == WM_T_82580)
4348 			    || (sc->sc_type == WM_T_82580ER)
4349 			    || (sc->sc_type == WM_T_I350)
4350 			    || (sc->sc_type == WM_T_I354))
4351 				wm_reset_init_script_82575(sc);
4352 		}
4353 		break;
4354 	default:
4355 		break;
4356 	}
4357 
4358 	if ((sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
4359 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4360 		/* clear global device reset status bit */
4361 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4362 	}
4363 
4364 	/* Clear any pending interrupt events. */
4365 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4366 	reg = CSR_READ(sc, WMREG_ICR);
4367 
4368 	/* reload sc_ctrl */
4369 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4370 
4371 	if (sc->sc_type == WM_T_I350)
4372 		wm_set_eee_i350(sc);
4373 
4374 	/* dummy read from WUC */
4375 	if (sc->sc_type == WM_T_PCH)
4376 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4377 	/*
4378 	 * For PCH, this write will make sure that any noise will be detected
4379 	 * as a CRC error and be dropped rather than show up as a bad packet
4380 	 * to the DMA engine
4381 	 */
4382 	if (sc->sc_type == WM_T_PCH)
4383 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4384 
4385 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4386 		CSR_WRITE(sc, WMREG_WUC, 0);
4387 
4388 	/* XXX need special handling for 82580 */
4389 }
4390 
4391 static void
4392 wm_set_vlan(struct wm_softc *sc)
4393 {
4394 	/* Deal with VLAN enables. */
4395 	if (VLAN_ATTACHED(&sc->sc_ethercom))
4396 		sc->sc_ctrl |= CTRL_VME;
4397 	else
4398 		sc->sc_ctrl &= ~CTRL_VME;
4399 
4400 	/* Write the control registers. */
4401 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4402 }
4403 
4404 /*
4405  * wm_init:		[ifnet interface function]
4406  *
4407  *	Initialize the interface.  Must be called at splnet().
4408  */
4409 static int
4410 wm_init(struct ifnet *ifp)
4411 {
4412 	struct wm_softc *sc = ifp->if_softc;
4413 	struct wm_rxsoft *rxs;
4414 	int i, j, trynum, error = 0;
4415 	uint32_t reg;
4416 
4417 	/*
4418 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4419 	 * There is a small but measurable benefit to avoiding the adjusment
4420 	 * of the descriptor so that the headers are aligned, for normal mtu,
4421 	 * on such platforms.  One possibility is that the DMA itself is
4422 	 * slightly more efficient if the front of the entire packet (instead
4423 	 * of the front of the headers) is aligned.
4424 	 *
4425 	 * Note we must always set align_tweak to 0 if we are using
4426 	 * jumbo frames.
4427 	 */
4428 #ifdef __NO_STRICT_ALIGNMENT
4429 	sc->sc_align_tweak = 0;
4430 #else
4431 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4432 		sc->sc_align_tweak = 0;
4433 	else
4434 		sc->sc_align_tweak = 2;
4435 #endif /* __NO_STRICT_ALIGNMENT */
4436 
4437 	/* Cancel any pending I/O. */
4438 	wm_stop(ifp, 0);
4439 
4440 	/* update statistics before reset */
4441 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4442 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4443 
4444 	/* Reset the chip to a known state. */
4445 	wm_reset(sc);
4446 
4447 	switch (sc->sc_type) {
4448 	case WM_T_82571:
4449 	case WM_T_82572:
4450 	case WM_T_82573:
4451 	case WM_T_82574:
4452 	case WM_T_82583:
4453 	case WM_T_80003:
4454 	case WM_T_ICH8:
4455 	case WM_T_ICH9:
4456 	case WM_T_ICH10:
4457 	case WM_T_PCH:
4458 	case WM_T_PCH2:
4459 	case WM_T_PCH_LPT:
4460 		if (wm_check_mng_mode(sc) != 0)
4461 			wm_get_hw_control(sc);
4462 		break;
4463 	default:
4464 		break;
4465 	}
4466 
4467 	/* Reset the PHY. */
4468 	if (sc->sc_flags & WM_F_HAS_MII)
4469 		wm_gmii_reset(sc);
4470 
4471 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
4472 	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
4473 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
4474 	    || (sc->sc_type == WM_T_PCH_LPT))
4475 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_PHYPDEN);
4476 
4477 	/* Initialize the transmit descriptor ring. */
4478 	memset(sc->sc_txdescs, 0, WM_TXDESCSIZE(sc));
4479 	WM_CDTXSYNC(sc, 0, WM_NTXDESC(sc),
4480 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4481 	sc->sc_txfree = WM_NTXDESC(sc);
4482 	sc->sc_txnext = 0;
4483 
4484 	if (sc->sc_type < WM_T_82543) {
4485 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(sc, 0));
4486 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(sc, 0));
4487 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(sc));
4488 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
4489 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
4490 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
4491 	} else {
4492 		CSR_WRITE(sc, WMREG_TDBAH, WM_CDTXADDR_HI(sc, 0));
4493 		CSR_WRITE(sc, WMREG_TDBAL, WM_CDTXADDR_LO(sc, 0));
4494 		CSR_WRITE(sc, WMREG_TDLEN, WM_TXDESCSIZE(sc));
4495 		CSR_WRITE(sc, WMREG_TDH, 0);
4496 		CSR_WRITE(sc, WMREG_TIDV, 375);		/* ITR / 4 */
4497 		CSR_WRITE(sc, WMREG_TADV, 375);		/* should be same */
4498 
4499 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4500 			/*
4501 			 * Don't write TDT before TCTL.EN is set.
4502 			 * See the document.
4503 			 */
4504 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_QUEUE_ENABLE
4505 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
4506 			    | TXDCTL_WTHRESH(0));
4507 		else {
4508 			CSR_WRITE(sc, WMREG_TDT, 0);
4509 			CSR_WRITE(sc, WMREG_TXDCTL, TXDCTL_PTHRESH(0) |
4510 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
4511 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_PTHRESH(0) |
4512 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
4513 		}
4514 	}
4515 	CSR_WRITE(sc, WMREG_TQSA_LO, 0);
4516 	CSR_WRITE(sc, WMREG_TQSA_HI, 0);
4517 
4518 	/* Initialize the transmit job descriptors. */
4519 	for (i = 0; i < WM_TXQUEUELEN(sc); i++)
4520 		sc->sc_txsoft[i].txs_mbuf = NULL;
4521 	sc->sc_txsfree = WM_TXQUEUELEN(sc);
4522 	sc->sc_txsnext = 0;
4523 	sc->sc_txsdirty = 0;
4524 
4525 	/*
4526 	 * Initialize the receive descriptor and receive job
4527 	 * descriptor rings.
4528 	 */
4529 	if (sc->sc_type < WM_T_82543) {
4530 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(sc, 0));
4531 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(sc, 0));
4532 		CSR_WRITE(sc, WMREG_OLD_RDLEN0, sizeof(sc->sc_rxdescs));
4533 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
4534 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
4535 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
4536 
4537 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
4538 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
4539 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
4540 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
4541 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
4542 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
4543 	} else {
4544 		CSR_WRITE(sc, WMREG_RDBAH, WM_CDRXADDR_HI(sc, 0));
4545 		CSR_WRITE(sc, WMREG_RDBAL, WM_CDRXADDR_LO(sc, 0));
4546 		CSR_WRITE(sc, WMREG_RDLEN, sizeof(sc->sc_rxdescs));
4547 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4548 			CSR_WRITE(sc, WMREG_EITR(0), 450);
4549 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
4550 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
4551 			CSR_WRITE(sc, WMREG_SRRCTL, SRRCTL_DESCTYPE_LEGACY
4552 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
4553 			CSR_WRITE(sc, WMREG_RXDCTL, RXDCTL_QUEUE_ENABLE
4554 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
4555 			    | RXDCTL_WTHRESH(1));
4556 		} else {
4557 			CSR_WRITE(sc, WMREG_RDH, 0);
4558 			CSR_WRITE(sc, WMREG_RDT, 0);
4559 			CSR_WRITE(sc, WMREG_RDTR, 375 | RDTR_FPD); /* ITR/4 */
4560 			CSR_WRITE(sc, WMREG_RADV, 375);	/* MUST be same */
4561 		}
4562 	}
4563 	for (i = 0; i < WM_NRXDESC; i++) {
4564 		rxs = &sc->sc_rxsoft[i];
4565 		if (rxs->rxs_mbuf == NULL) {
4566 			if ((error = wm_add_rxbuf(sc, i)) != 0) {
4567 				log(LOG_ERR, "%s: unable to allocate or map "
4568 				    "rx buffer %d, error = %d\n",
4569 				    device_xname(sc->sc_dev), i, error);
4570 				/*
4571 				 * XXX Should attempt to run with fewer receive
4572 				 * XXX buffers instead of just failing.
4573 				 */
4574 				wm_rxdrain(sc);
4575 				goto out;
4576 			}
4577 		} else {
4578 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4579 				WM_INIT_RXDESC(sc, i);
4580 			/*
4581 			 * For 82575 and newer device, the RX descriptors
4582 			 * must be initialized after the setting of RCTL.EN in
4583 			 * wm_set_filter()
4584 			 */
4585 		}
4586 	}
4587 	sc->sc_rxptr = 0;
4588 	sc->sc_rxdiscard = 0;
4589 	WM_RXCHAIN_RESET(sc);
4590 
4591 	/*
4592 	 * Clear out the VLAN table -- we don't use it (yet).
4593 	 */
4594 	CSR_WRITE(sc, WMREG_VET, 0);
4595 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4596 		trynum = 10; /* Due to hw errata */
4597 	else
4598 		trynum = 1;
4599 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4600 		for (j = 0; j < trynum; j++)
4601 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4602 
4603 	/*
4604 	 * Set up flow-control parameters.
4605 	 *
4606 	 * XXX Values could probably stand some tuning.
4607 	 */
4608 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4609 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4610 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4611 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4612 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4613 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4614 	}
4615 
4616 	sc->sc_fcrtl = FCRTL_DFLT;
4617 	if (sc->sc_type < WM_T_82543) {
4618 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4619 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4620 	} else {
4621 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4622 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4623 	}
4624 
4625 	if (sc->sc_type == WM_T_80003)
4626 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4627 	else
4628 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4629 
4630 	/* Writes the control register. */
4631 	wm_set_vlan(sc);
4632 
4633 	if (sc->sc_flags & WM_F_HAS_MII) {
4634 		int val;
4635 
4636 		switch (sc->sc_type) {
4637 		case WM_T_80003:
4638 		case WM_T_ICH8:
4639 		case WM_T_ICH9:
4640 		case WM_T_ICH10:
4641 		case WM_T_PCH:
4642 		case WM_T_PCH2:
4643 		case WM_T_PCH_LPT:
4644 			/*
4645 			 * Set the mac to wait the maximum time between each
4646 			 * iteration and increase the max iterations when
4647 			 * polling the phy; this fixes erroneous timeouts at
4648 			 * 10Mbps.
4649 			 */
4650 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4651 			    0xFFFF);
4652 			val = wm_kmrn_readreg(sc,
4653 			    KUMCTRLSTA_OFFSET_INB_PARAM);
4654 			val |= 0x3F;
4655 			wm_kmrn_writereg(sc,
4656 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4657 			break;
4658 		default:
4659 			break;
4660 		}
4661 
4662 		if (sc->sc_type == WM_T_80003) {
4663 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4664 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4665 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4666 
4667 			/* Bypass RX and TX FIFO's */
4668 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4669 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4670 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4671 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4672 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4673 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4674 		}
4675 	}
4676 #if 0
4677 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4678 #endif
4679 
4680 	/*
4681 	 * Set up checksum offload parameters.
4682 	 */
4683 	reg = CSR_READ(sc, WMREG_RXCSUM);
4684 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4685 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4686 		reg |= RXCSUM_IPOFL;
4687 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4688 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4689 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4690 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4691 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4692 
4693 	/* Reset TBI's RXCFG count */
4694 	sc->sc_tbi_nrxcfg = sc->sc_tbi_lastnrxcfg = 0;
4695 
4696 	/*
4697 	 * Set up the interrupt registers.
4698 	 */
4699 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4700 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4701 	    ICR_RXO | ICR_RXT0;
4702 	if ((sc->sc_flags & WM_F_HAS_MII) == 0)
4703 		sc->sc_icr |= ICR_RXCFG;
4704 	CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4705 
4706 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4707 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4708 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4709 		reg = CSR_READ(sc, WMREG_KABGTXD);
4710 		reg |= KABGTXD_BGSQLBIAS;
4711 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4712 	}
4713 
4714 	/* Set up the inter-packet gap. */
4715 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4716 
4717 	if (sc->sc_type >= WM_T_82543) {
4718 		/*
4719 		 * Set up the interrupt throttling register (units of 256ns)
4720 		 * Note that a footnote in Intel's documentation says this
4721 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4722 		 * or 10Mbit mode.  Empirically, it appears to be the case
4723 		 * that that is also true for the 1024ns units of the other
4724 		 * interrupt-related timer registers -- so, really, we ought
4725 		 * to divide this value by 4 when the link speed is low.
4726 		 *
4727 		 * XXX implement this division at link speed change!
4728 		 */
4729 
4730 		 /*
4731 		  * For N interrupts/sec, set this value to:
4732 		  * 1000000000 / (N * 256).  Note that we set the
4733 		  * absolute and packet timer values to this value
4734 		  * divided by 4 to get "simple timer" behavior.
4735 		  */
4736 
4737 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4738 		CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4739 	}
4740 
4741 	/* Set the VLAN ethernetype. */
4742 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4743 
4744 	/*
4745 	 * Set up the transmit control register; we start out with
4746 	 * a collision distance suitable for FDX, but update it whe
4747 	 * we resolve the media type.
4748 	 */
4749 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4750 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4751 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4752 	if (sc->sc_type >= WM_T_82571)
4753 		sc->sc_tctl |= TCTL_MULR;
4754 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4755 
4756 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4757 		/*
4758 		 * Write TDT after TCTL.EN is set.
4759 		 * See the document.
4760 		 */
4761 		CSR_WRITE(sc, WMREG_TDT, 0);
4762 	}
4763 
4764 	if (sc->sc_type == WM_T_80003) {
4765 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4766 		reg &= ~TCTL_EXT_GCEX_MASK;
4767 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4768 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4769 	}
4770 
4771 	/* Set the media. */
4772 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4773 		goto out;
4774 
4775 	/* Configure for OS presence */
4776 	wm_init_manageability(sc);
4777 
4778 	/*
4779 	 * Set up the receive control register; we actually program
4780 	 * the register when we set the receive filter.  Use multicast
4781 	 * address offset type 0.
4782 	 *
4783 	 * Only the i82544 has the ability to strip the incoming
4784 	 * CRC, so we don't enable that feature.
4785 	 */
4786 	sc->sc_mchash_type = 0;
4787 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4788 	    | RCTL_MO(sc->sc_mchash_type);
4789 
4790 	/*
4791 	 * The I350 has a bug where it always strips the CRC whether
4792 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4793 	 */
4794 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4795 	    || (sc->sc_type == WM_T_I210))
4796 		sc->sc_rctl |= RCTL_SECRC;
4797 
4798 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4799 	    && (ifp->if_mtu > ETHERMTU)) {
4800 		sc->sc_rctl |= RCTL_LPE;
4801 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4802 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4803 	}
4804 
4805 	if (MCLBYTES == 2048) {
4806 		sc->sc_rctl |= RCTL_2k;
4807 	} else {
4808 		if (sc->sc_type >= WM_T_82543) {
4809 			switch (MCLBYTES) {
4810 			case 4096:
4811 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4812 				break;
4813 			case 8192:
4814 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4815 				break;
4816 			case 16384:
4817 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4818 				break;
4819 			default:
4820 				panic("wm_init: MCLBYTES %d unsupported",
4821 				    MCLBYTES);
4822 				break;
4823 			}
4824 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4825 	}
4826 
4827 	/* Set the receive filter. */
4828 	wm_set_filter(sc);
4829 
4830 	/* Enable ECC */
4831 	switch (sc->sc_type) {
4832 	case WM_T_82571:
4833 		reg = CSR_READ(sc, WMREG_PBA_ECC);
4834 		reg |= PBA_ECC_CORR_EN;
4835 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4836 		break;
4837 	case WM_T_PCH_LPT:
4838 		reg = CSR_READ(sc, WMREG_PBECCSTS);
4839 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4840 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4841 
4842 		reg = CSR_READ(sc, WMREG_CTRL);
4843 		reg |= CTRL_MEHE;
4844 		CSR_WRITE(sc, WMREG_CTRL, reg);
4845 		break;
4846 	default:
4847 		break;
4848 	}
4849 
4850 	/* On 575 and later set RDT only if RX enabled */
4851 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4852 		for (i = 0; i < WM_NRXDESC; i++)
4853 			WM_INIT_RXDESC(sc, i);
4854 
4855 	/* Start the one second link check clock. */
4856 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4857 
4858 	/* ...all done! */
4859 	ifp->if_flags |= IFF_RUNNING;
4860 	ifp->if_flags &= ~IFF_OACTIVE;
4861 
4862  out:
4863 	sc->sc_if_flags = ifp->if_flags;
4864 	if (error)
4865 		log(LOG_ERR, "%s: interface not running\n",
4866 		    device_xname(sc->sc_dev));
4867 	return error;
4868 }
4869 
4870 /*
4871  * wm_rxdrain:
4872  *
4873  *	Drain the receive queue.
4874  */
4875 static void
4876 wm_rxdrain(struct wm_softc *sc)
4877 {
4878 	struct wm_rxsoft *rxs;
4879 	int i;
4880 
4881 	for (i = 0; i < WM_NRXDESC; i++) {
4882 		rxs = &sc->sc_rxsoft[i];
4883 		if (rxs->rxs_mbuf != NULL) {
4884 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4885 			m_freem(rxs->rxs_mbuf);
4886 			rxs->rxs_mbuf = NULL;
4887 		}
4888 	}
4889 }
4890 
4891 /*
4892  * wm_stop:		[ifnet interface function]
4893  *
4894  *	Stop transmission on the interface.
4895  */
4896 static void
4897 wm_stop(struct ifnet *ifp, int disable)
4898 {
4899 	struct wm_softc *sc = ifp->if_softc;
4900 	struct wm_txsoft *txs;
4901 	int i;
4902 
4903 	/* Stop the one second clock. */
4904 	callout_stop(&sc->sc_tick_ch);
4905 
4906 	/* Stop the 82547 Tx FIFO stall check timer. */
4907 	if (sc->sc_type == WM_T_82547)
4908 		callout_stop(&sc->sc_txfifo_ch);
4909 
4910 	if (sc->sc_flags & WM_F_HAS_MII) {
4911 		/* Down the MII. */
4912 		mii_down(&sc->sc_mii);
4913 	} else {
4914 #if 0
4915 		/* Should we clear PHY's status properly? */
4916 		wm_reset(sc);
4917 #endif
4918 	}
4919 
4920 	/* Stop the transmit and receive processes. */
4921 	CSR_WRITE(sc, WMREG_TCTL, 0);
4922 	CSR_WRITE(sc, WMREG_RCTL, 0);
4923 	sc->sc_rctl &= ~RCTL_EN;
4924 
4925 	/*
4926 	 * Clear the interrupt mask to ensure the device cannot assert its
4927 	 * interrupt line.
4928 	 * Clear sc->sc_icr to ensure wm_intr() makes no attempt to service
4929 	 * any currently pending or shared interrupt.
4930 	 */
4931 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4932 	sc->sc_icr = 0;
4933 
4934 	/* Release any queued transmit buffers. */
4935 	for (i = 0; i < WM_TXQUEUELEN(sc); i++) {
4936 		txs = &sc->sc_txsoft[i];
4937 		if (txs->txs_mbuf != NULL) {
4938 			bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
4939 			m_freem(txs->txs_mbuf);
4940 			txs->txs_mbuf = NULL;
4941 		}
4942 	}
4943 
4944 	/* Mark the interface as down and cancel the watchdog timer. */
4945 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
4946 	ifp->if_timer = 0;
4947 
4948 	if (disable)
4949 		wm_rxdrain(sc);
4950 
4951 #if 0 /* notyet */
4952 	if (sc->sc_type >= WM_T_82544)
4953 		CSR_WRITE(sc, WMREG_WUC, 0);
4954 #endif
4955 }
4956 
4957 void
4958 wm_get_auto_rd_done(struct wm_softc *sc)
4959 {
4960 	int i;
4961 
4962 	/* wait for eeprom to reload */
4963 	switch (sc->sc_type) {
4964 	case WM_T_82571:
4965 	case WM_T_82572:
4966 	case WM_T_82573:
4967 	case WM_T_82574:
4968 	case WM_T_82583:
4969 	case WM_T_82575:
4970 	case WM_T_82576:
4971 	case WM_T_82580:
4972 	case WM_T_82580ER:
4973 	case WM_T_I350:
4974 	case WM_T_I354:
4975 	case WM_T_I210:
4976 	case WM_T_I211:
4977 	case WM_T_80003:
4978 	case WM_T_ICH8:
4979 	case WM_T_ICH9:
4980 		for (i = 0; i < 10; i++) {
4981 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
4982 				break;
4983 			delay(1000);
4984 		}
4985 		if (i == 10) {
4986 			log(LOG_ERR, "%s: auto read from eeprom failed to "
4987 			    "complete\n", device_xname(sc->sc_dev));
4988 		}
4989 		break;
4990 	default:
4991 		break;
4992 	}
4993 }
4994 
4995 void
4996 wm_lan_init_done(struct wm_softc *sc)
4997 {
4998 	uint32_t reg = 0;
4999 	int i;
5000 
5001 	/* wait for eeprom to reload */
5002 	switch (sc->sc_type) {
5003 	case WM_T_ICH10:
5004 	case WM_T_PCH:
5005 	case WM_T_PCH2:
5006 	case WM_T_PCH_LPT:
5007 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
5008 			reg = CSR_READ(sc, WMREG_STATUS);
5009 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
5010 				break;
5011 			delay(100);
5012 		}
5013 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
5014 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
5015 			    "complete\n", device_xname(sc->sc_dev), __func__);
5016 		}
5017 		break;
5018 	default:
5019 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5020 		    __func__);
5021 		break;
5022 	}
5023 
5024 	reg &= ~STATUS_LAN_INIT_DONE;
5025 	CSR_WRITE(sc, WMREG_STATUS, reg);
5026 }
5027 
5028 void
5029 wm_get_cfg_done(struct wm_softc *sc)
5030 {
5031 	int mask;
5032 	uint32_t reg;
5033 	int i;
5034 
5035 	/* wait for eeprom to reload */
5036 	switch (sc->sc_type) {
5037 	case WM_T_82542_2_0:
5038 	case WM_T_82542_2_1:
5039 		/* null */
5040 		break;
5041 	case WM_T_82543:
5042 	case WM_T_82544:
5043 	case WM_T_82540:
5044 	case WM_T_82545:
5045 	case WM_T_82545_3:
5046 	case WM_T_82546:
5047 	case WM_T_82546_3:
5048 	case WM_T_82541:
5049 	case WM_T_82541_2:
5050 	case WM_T_82547:
5051 	case WM_T_82547_2:
5052 	case WM_T_82573:
5053 	case WM_T_82574:
5054 	case WM_T_82583:
5055 		/* generic */
5056 		delay(10*1000);
5057 		break;
5058 	case WM_T_80003:
5059 	case WM_T_82571:
5060 	case WM_T_82572:
5061 	case WM_T_82575:
5062 	case WM_T_82576:
5063 	case WM_T_82580:
5064 	case WM_T_82580ER:
5065 	case WM_T_I350:
5066 	case WM_T_I354:
5067 	case WM_T_I210:
5068 	case WM_T_I211:
5069 		if (sc->sc_type == WM_T_82571) {
5070 			/* Only 82571 shares port 0 */
5071 			mask = EEMNGCTL_CFGDONE_0;
5072 		} else
5073 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
5074 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
5075 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
5076 				break;
5077 			delay(1000);
5078 		}
5079 		if (i >= WM_PHY_CFG_TIMEOUT) {
5080 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
5081 				device_xname(sc->sc_dev), __func__));
5082 		}
5083 		break;
5084 	case WM_T_ICH8:
5085 	case WM_T_ICH9:
5086 	case WM_T_ICH10:
5087 	case WM_T_PCH:
5088 	case WM_T_PCH2:
5089 	case WM_T_PCH_LPT:
5090 		delay(10*1000);
5091 		if (sc->sc_type >= WM_T_ICH10)
5092 			wm_lan_init_done(sc);
5093 		else
5094 			wm_get_auto_rd_done(sc);
5095 
5096 		reg = CSR_READ(sc, WMREG_STATUS);
5097 		if ((reg & STATUS_PHYRA) != 0)
5098 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
5099 		break;
5100 	default:
5101 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
5102 		    __func__);
5103 		break;
5104 	}
5105 }
5106 
5107 /*
5108  * wm_acquire_eeprom:
5109  *
5110  *	Perform the EEPROM handshake required on some chips.
5111  */
5112 static int
5113 wm_acquire_eeprom(struct wm_softc *sc)
5114 {
5115 	uint32_t reg;
5116 	int x;
5117 	int ret = 0;
5118 
5119 	/* always success */
5120 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5121 		return 0;
5122 
5123 	if (sc->sc_flags & WM_F_SWFWHW_SYNC) {
5124 		ret = wm_get_swfwhw_semaphore(sc);
5125 	} else if (sc->sc_flags & WM_F_SWFW_SYNC) {
5126 		/* this will also do wm_get_swsm_semaphore() if needed */
5127 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
5128 	} else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
5129 		ret = wm_get_swsm_semaphore(sc);
5130 	}
5131 
5132 	if (ret) {
5133 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
5134 			__func__);
5135 		return 1;
5136 	}
5137 
5138 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5139 		reg = CSR_READ(sc, WMREG_EECD);
5140 
5141 		/* Request EEPROM access. */
5142 		reg |= EECD_EE_REQ;
5143 		CSR_WRITE(sc, WMREG_EECD, reg);
5144 
5145 		/* ..and wait for it to be granted. */
5146 		for (x = 0; x < 1000; x++) {
5147 			reg = CSR_READ(sc, WMREG_EECD);
5148 			if (reg & EECD_EE_GNT)
5149 				break;
5150 			delay(5);
5151 		}
5152 		if ((reg & EECD_EE_GNT) == 0) {
5153 			aprint_error_dev(sc->sc_dev,
5154 			    "could not acquire EEPROM GNT\n");
5155 			reg &= ~EECD_EE_REQ;
5156 			CSR_WRITE(sc, WMREG_EECD, reg);
5157 			if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5158 				wm_put_swfwhw_semaphore(sc);
5159 			if (sc->sc_flags & WM_F_SWFW_SYNC)
5160 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5161 			else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5162 				wm_put_swsm_semaphore(sc);
5163 			return 1;
5164 		}
5165 	}
5166 
5167 	return 0;
5168 }
5169 
5170 /*
5171  * wm_release_eeprom:
5172  *
5173  *	Release the EEPROM mutex.
5174  */
5175 static void
5176 wm_release_eeprom(struct wm_softc *sc)
5177 {
5178 	uint32_t reg;
5179 
5180 	/* always success */
5181 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
5182 		return;
5183 
5184 	if (sc->sc_flags & WM_F_EEPROM_HANDSHAKE) {
5185 		reg = CSR_READ(sc, WMREG_EECD);
5186 		reg &= ~EECD_EE_REQ;
5187 		CSR_WRITE(sc, WMREG_EECD, reg);
5188 	}
5189 
5190 	if (sc->sc_flags & WM_F_SWFWHW_SYNC)
5191 		wm_put_swfwhw_semaphore(sc);
5192 	if (sc->sc_flags & WM_F_SWFW_SYNC)
5193 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
5194 	else if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
5195 		wm_put_swsm_semaphore(sc);
5196 }
5197 
5198 /*
5199  * wm_eeprom_sendbits:
5200  *
5201  *	Send a series of bits to the EEPROM.
5202  */
5203 static void
5204 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
5205 {
5206 	uint32_t reg;
5207 	int x;
5208 
5209 	reg = CSR_READ(sc, WMREG_EECD);
5210 
5211 	for (x = nbits; x > 0; x--) {
5212 		if (bits & (1U << (x - 1)))
5213 			reg |= EECD_DI;
5214 		else
5215 			reg &= ~EECD_DI;
5216 		CSR_WRITE(sc, WMREG_EECD, reg);
5217 		CSR_WRITE_FLUSH(sc);
5218 		delay(2);
5219 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5220 		CSR_WRITE_FLUSH(sc);
5221 		delay(2);
5222 		CSR_WRITE(sc, WMREG_EECD, reg);
5223 		CSR_WRITE_FLUSH(sc);
5224 		delay(2);
5225 	}
5226 }
5227 
5228 /*
5229  * wm_eeprom_recvbits:
5230  *
5231  *	Receive a series of bits from the EEPROM.
5232  */
5233 static void
5234 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
5235 {
5236 	uint32_t reg, val;
5237 	int x;
5238 
5239 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
5240 
5241 	val = 0;
5242 	for (x = nbits; x > 0; x--) {
5243 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
5244 		CSR_WRITE_FLUSH(sc);
5245 		delay(2);
5246 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
5247 			val |= (1U << (x - 1));
5248 		CSR_WRITE(sc, WMREG_EECD, reg);
5249 		CSR_WRITE_FLUSH(sc);
5250 		delay(2);
5251 	}
5252 	*valp = val;
5253 }
5254 
5255 /*
5256  * wm_read_eeprom_uwire:
5257  *
5258  *	Read a word from the EEPROM using the MicroWire protocol.
5259  */
5260 static int
5261 wm_read_eeprom_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5262 {
5263 	uint32_t reg, val;
5264 	int i;
5265 
5266 	for (i = 0; i < wordcnt; i++) {
5267 		/* Clear SK and DI. */
5268 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
5269 		CSR_WRITE(sc, WMREG_EECD, reg);
5270 
5271 		/*
5272 		 * XXX: workaround for a bug in qemu-0.12.x and prior
5273 		 * and Xen.
5274 		 *
5275 		 * We use this workaround only for 82540 because qemu's
5276 		 * e1000 act as 82540.
5277 		 */
5278 		if (sc->sc_type == WM_T_82540) {
5279 			reg |= EECD_SK;
5280 			CSR_WRITE(sc, WMREG_EECD, reg);
5281 			reg &= ~EECD_SK;
5282 			CSR_WRITE(sc, WMREG_EECD, reg);
5283 			CSR_WRITE_FLUSH(sc);
5284 			delay(2);
5285 		}
5286 		/* XXX: end of workaround */
5287 
5288 		/* Set CHIP SELECT. */
5289 		reg |= EECD_CS;
5290 		CSR_WRITE(sc, WMREG_EECD, reg);
5291 		CSR_WRITE_FLUSH(sc);
5292 		delay(2);
5293 
5294 		/* Shift in the READ command. */
5295 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
5296 
5297 		/* Shift in address. */
5298 		wm_eeprom_sendbits(sc, word + i, sc->sc_ee_addrbits);
5299 
5300 		/* Shift out the data. */
5301 		wm_eeprom_recvbits(sc, &val, 16);
5302 		data[i] = val & 0xffff;
5303 
5304 		/* Clear CHIP SELECT. */
5305 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
5306 		CSR_WRITE(sc, WMREG_EECD, reg);
5307 		CSR_WRITE_FLUSH(sc);
5308 		delay(2);
5309 	}
5310 
5311 	return 0;
5312 }
5313 
5314 /*
5315  * wm_spi_eeprom_ready:
5316  *
5317  *	Wait for a SPI EEPROM to be ready for commands.
5318  */
5319 static int
5320 wm_spi_eeprom_ready(struct wm_softc *sc)
5321 {
5322 	uint32_t val;
5323 	int usec;
5324 
5325 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
5326 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
5327 		wm_eeprom_recvbits(sc, &val, 8);
5328 		if ((val & SPI_SR_RDY) == 0)
5329 			break;
5330 	}
5331 	if (usec >= SPI_MAX_RETRIES) {
5332 		aprint_error_dev(sc->sc_dev, "EEPROM failed to become ready\n");
5333 		return 1;
5334 	}
5335 	return 0;
5336 }
5337 
5338 /*
5339  * wm_read_eeprom_spi:
5340  *
5341  *	Read a work from the EEPROM using the SPI protocol.
5342  */
5343 static int
5344 wm_read_eeprom_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5345 {
5346 	uint32_t reg, val;
5347 	int i;
5348 	uint8_t opc;
5349 
5350 	/* Clear SK and CS. */
5351 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
5352 	CSR_WRITE(sc, WMREG_EECD, reg);
5353 	CSR_WRITE_FLUSH(sc);
5354 	delay(2);
5355 
5356 	if (wm_spi_eeprom_ready(sc))
5357 		return 1;
5358 
5359 	/* Toggle CS to flush commands. */
5360 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
5361 	CSR_WRITE_FLUSH(sc);
5362 	delay(2);
5363 	CSR_WRITE(sc, WMREG_EECD, reg);
5364 	CSR_WRITE_FLUSH(sc);
5365 	delay(2);
5366 
5367 	opc = SPI_OPC_READ;
5368 	if (sc->sc_ee_addrbits == 8 && word >= 128)
5369 		opc |= SPI_OPC_A8;
5370 
5371 	wm_eeprom_sendbits(sc, opc, 8);
5372 	wm_eeprom_sendbits(sc, word << 1, sc->sc_ee_addrbits);
5373 
5374 	for (i = 0; i < wordcnt; i++) {
5375 		wm_eeprom_recvbits(sc, &val, 16);
5376 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
5377 	}
5378 
5379 	/* Raise CS and clear SK. */
5380 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
5381 	CSR_WRITE(sc, WMREG_EECD, reg);
5382 	CSR_WRITE_FLUSH(sc);
5383 	delay(2);
5384 
5385 	return 0;
5386 }
5387 
5388 #define NVM_CHECKSUM			0xBABA
5389 #define EEPROM_SIZE			0x0040
5390 #define NVM_COMPAT			0x0003
5391 #define NVM_COMPAT_VALID_CHECKSUM	0x0001
5392 #define NVM_FUTURE_INIT_WORD1			0x0019
5393 #define NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM	0x0040
5394 
5395 /*
5396  * wm_validate_eeprom_checksum
5397  *
5398  * The checksum is defined as the sum of the first 64 (16 bit) words.
5399  */
5400 static int
5401 wm_validate_eeprom_checksum(struct wm_softc *sc)
5402 {
5403 	uint16_t checksum;
5404 	uint16_t eeprom_data;
5405 #ifdef WM_DEBUG
5406 	uint16_t csum_wordaddr, valid_checksum;
5407 #endif
5408 	int i;
5409 
5410 	checksum = 0;
5411 
5412 	/* Don't check for I211 */
5413 	if (sc->sc_type == WM_T_I211)
5414 		return 0;
5415 
5416 #ifdef WM_DEBUG
5417 	if (sc->sc_type == WM_T_PCH_LPT) {
5418 		csum_wordaddr = NVM_COMPAT;
5419 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
5420 	} else {
5421 		csum_wordaddr = NVM_FUTURE_INIT_WORD1;
5422 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
5423 	}
5424 
5425 	/* Dump EEPROM image for debug */
5426 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5427 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5428 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5429 		wm_read_eeprom(sc, csum_wordaddr, 1, &eeprom_data);
5430 		if ((eeprom_data & valid_checksum) == 0) {
5431 			DPRINTF(WM_DEBUG_NVM,
5432 			    ("%s: NVM need to be updated (%04x != %04x)\n",
5433 				device_xname(sc->sc_dev), eeprom_data,
5434 				    valid_checksum));
5435 		}
5436 	}
5437 
5438 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
5439 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
5440 		for (i = 0; i < EEPROM_SIZE; i++) {
5441 			if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5442 				printf("XX ");
5443 			else
5444 				printf("%04x ", eeprom_data);
5445 			if (i % 8 == 7)
5446 				printf("\n");
5447 		}
5448 	}
5449 
5450 #endif /* WM_DEBUG */
5451 
5452 	for (i = 0; i < EEPROM_SIZE; i++) {
5453 		if (wm_read_eeprom(sc, i, 1, &eeprom_data))
5454 			return 1;
5455 		checksum += eeprom_data;
5456 	}
5457 
5458 	if (checksum != (uint16_t) NVM_CHECKSUM) {
5459 #ifdef WM_DEBUG
5460 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
5461 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
5462 #endif
5463 	}
5464 
5465 	return 0;
5466 }
5467 
5468 /*
5469  * wm_read_eeprom:
5470  *
5471  *	Read data from the serial EEPROM.
5472  */
5473 static int
5474 wm_read_eeprom(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
5475 {
5476 	int rv;
5477 
5478 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
5479 		return 1;
5480 
5481 	if (wm_acquire_eeprom(sc))
5482 		return 1;
5483 
5484 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5485 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5486 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5487 		rv = wm_read_eeprom_ich8(sc, word, wordcnt, data);
5488 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
5489 		rv = wm_read_eeprom_eerd(sc, word, wordcnt, data);
5490 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
5491 		rv = wm_read_eeprom_spi(sc, word, wordcnt, data);
5492 	else
5493 		rv = wm_read_eeprom_uwire(sc, word, wordcnt, data);
5494 
5495 	wm_release_eeprom(sc);
5496 	return rv;
5497 }
5498 
5499 static int
5500 wm_read_eeprom_eerd(struct wm_softc *sc, int offset, int wordcnt,
5501     uint16_t *data)
5502 {
5503 	int i, eerd = 0;
5504 	int error = 0;
5505 
5506 	for (i = 0; i < wordcnt; i++) {
5507 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
5508 
5509 		CSR_WRITE(sc, WMREG_EERD, eerd);
5510 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
5511 		if (error != 0)
5512 			break;
5513 
5514 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
5515 	}
5516 
5517 	return error;
5518 }
5519 
5520 static int
5521 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
5522 {
5523 	uint32_t attempts = 100000;
5524 	uint32_t i, reg = 0;
5525 	int32_t done = -1;
5526 
5527 	for (i = 0; i < attempts; i++) {
5528 		reg = CSR_READ(sc, rw);
5529 
5530 		if (reg & EERD_DONE) {
5531 			done = 0;
5532 			break;
5533 		}
5534 		delay(5);
5535 	}
5536 
5537 	return done;
5538 }
5539 
5540 static int
5541 wm_check_alt_mac_addr(struct wm_softc *sc)
5542 {
5543 	uint16_t myea[ETHER_ADDR_LEN / 2];
5544 	uint16_t offset = EEPROM_OFF_MACADDR;
5545 
5546 	/* Try to read alternative MAC address pointer */
5547 	if (wm_read_eeprom(sc, EEPROM_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
5548 		return -1;
5549 
5550 	/* Check pointer */
5551 	if (offset == 0xffff)
5552 		return -1;
5553 
5554 	/*
5555 	 * Check whether alternative MAC address is valid or not.
5556 	 * Some cards have non 0xffff pointer but those don't use
5557 	 * alternative MAC address in reality.
5558 	 *
5559 	 * Check whether the broadcast bit is set or not.
5560 	 */
5561 	if (wm_read_eeprom(sc, offset, 1, myea) == 0)
5562 		if (((myea[0] & 0xff) & 0x01) == 0)
5563 			return 0; /* found! */
5564 
5565 	/* not found */
5566 	return -1;
5567 }
5568 
5569 static int
5570 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
5571 {
5572 	uint16_t myea[ETHER_ADDR_LEN / 2];
5573 	uint16_t offset = EEPROM_OFF_MACADDR;
5574 	int do_invert = 0;
5575 
5576 	switch (sc->sc_type) {
5577 	case WM_T_82580:
5578 	case WM_T_82580ER:
5579 	case WM_T_I350:
5580 	case WM_T_I354:
5581 		switch (sc->sc_funcid) {
5582 		case 0:
5583 			/* default value (== EEPROM_OFF_MACADDR) */
5584 			break;
5585 		case 1:
5586 			offset = EEPROM_OFF_LAN1;
5587 			break;
5588 		case 2:
5589 			offset = EEPROM_OFF_LAN2;
5590 			break;
5591 		case 3:
5592 			offset = EEPROM_OFF_LAN3;
5593 			break;
5594 		default:
5595 			goto bad;
5596 			/* NOTREACHED */
5597 			break;
5598 		}
5599 		break;
5600 	case WM_T_82571:
5601 	case WM_T_82575:
5602 	case WM_T_82576:
5603 	case WM_T_80003:
5604 	case WM_T_I210:
5605 	case WM_T_I211:
5606 		if (wm_check_alt_mac_addr(sc) != 0) {
5607 			/* reset the offset to LAN0 */
5608 			offset = EEPROM_OFF_MACADDR;
5609 			if ((sc->sc_funcid & 0x01) == 1)
5610 				do_invert = 1;
5611 			goto do_read;
5612 		}
5613 		switch (sc->sc_funcid) {
5614 		case 0:
5615 			/*
5616 			 * The offset is the value in EEPROM_ALT_MAC_ADDR_PTR
5617 			 * itself.
5618 			 */
5619 			break;
5620 		case 1:
5621 			offset += EEPROM_OFF_MACADDR_LAN1;
5622 			break;
5623 		case 2:
5624 			offset += EEPROM_OFF_MACADDR_LAN2;
5625 			break;
5626 		case 3:
5627 			offset += EEPROM_OFF_MACADDR_LAN3;
5628 			break;
5629 		default:
5630 			goto bad;
5631 			/* NOTREACHED */
5632 			break;
5633 		}
5634 		break;
5635 	default:
5636 		if ((sc->sc_funcid & 0x01) == 1)
5637 			do_invert = 1;
5638 		break;
5639 	}
5640 
5641  do_read:
5642 	if (wm_read_eeprom(sc, offset, sizeof(myea) / sizeof(myea[0]),
5643 		myea) != 0) {
5644 		goto bad;
5645 	}
5646 
5647 	enaddr[0] = myea[0] & 0xff;
5648 	enaddr[1] = myea[0] >> 8;
5649 	enaddr[2] = myea[1] & 0xff;
5650 	enaddr[3] = myea[1] >> 8;
5651 	enaddr[4] = myea[2] & 0xff;
5652 	enaddr[5] = myea[2] >> 8;
5653 
5654 	/*
5655 	 * Toggle the LSB of the MAC address on the second port
5656 	 * of some dual port cards.
5657 	 */
5658 	if (do_invert != 0)
5659 		enaddr[5] ^= 1;
5660 
5661 	return 0;
5662 
5663  bad:
5664 	aprint_error_dev(sc->sc_dev, "unable to read Ethernet address\n");
5665 
5666 	return -1;
5667 }
5668 
5669 /*
5670  * wm_add_rxbuf:
5671  *
5672  *	Add a receive buffer to the indiciated descriptor.
5673  */
5674 static int
5675 wm_add_rxbuf(struct wm_softc *sc, int idx)
5676 {
5677 	struct wm_rxsoft *rxs = &sc->sc_rxsoft[idx];
5678 	struct mbuf *m;
5679 	int error;
5680 
5681 	MGETHDR(m, M_DONTWAIT, MT_DATA);
5682 	if (m == NULL)
5683 		return ENOBUFS;
5684 
5685 	MCLGET(m, M_DONTWAIT);
5686 	if ((m->m_flags & M_EXT) == 0) {
5687 		m_freem(m);
5688 		return ENOBUFS;
5689 	}
5690 
5691 	if (rxs->rxs_mbuf != NULL)
5692 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
5693 
5694 	rxs->rxs_mbuf = m;
5695 
5696 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
5697 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
5698 	    BUS_DMA_READ|BUS_DMA_NOWAIT);
5699 	if (error) {
5700 		/* XXX XXX XXX */
5701 		aprint_error_dev(sc->sc_dev,
5702 		    "unable to load rx DMA map %d, error = %d\n",
5703 		    idx, error);
5704 		panic("wm_add_rxbuf");
5705 	}
5706 
5707 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
5708 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
5709 
5710 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5711 		if ((sc->sc_rctl & RCTL_EN) != 0)
5712 			WM_INIT_RXDESC(sc, idx);
5713 	} else
5714 		WM_INIT_RXDESC(sc, idx);
5715 
5716 	return 0;
5717 }
5718 
5719 /*
5720  * wm_set_ral:
5721  *
5722  *	Set an entery in the receive address list.
5723  */
5724 static void
5725 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
5726 {
5727 	uint32_t ral_lo, ral_hi;
5728 
5729 	if (enaddr != NULL) {
5730 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
5731 		    (enaddr[3] << 24);
5732 		ral_hi = enaddr[4] | (enaddr[5] << 8);
5733 		ral_hi |= RAL_AV;
5734 	} else {
5735 		ral_lo = 0;
5736 		ral_hi = 0;
5737 	}
5738 
5739 	if (sc->sc_type >= WM_T_82544) {
5740 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
5741 		    ral_lo);
5742 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
5743 		    ral_hi);
5744 	} else {
5745 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
5746 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
5747 	}
5748 }
5749 
5750 /*
5751  * wm_mchash:
5752  *
5753  *	Compute the hash of the multicast address for the 4096-bit
5754  *	multicast filter.
5755  */
5756 static uint32_t
5757 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
5758 {
5759 	static const int lo_shift[4] = { 4, 3, 2, 0 };
5760 	static const int hi_shift[4] = { 4, 5, 6, 8 };
5761 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
5762 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
5763 	uint32_t hash;
5764 
5765 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5766 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5767 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
5768 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
5769 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
5770 		return (hash & 0x3ff);
5771 	}
5772 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
5773 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
5774 
5775 	return (hash & 0xfff);
5776 }
5777 
5778 /*
5779  * wm_set_filter:
5780  *
5781  *	Set up the receive filter.
5782  */
5783 static void
5784 wm_set_filter(struct wm_softc *sc)
5785 {
5786 	struct ethercom *ec = &sc->sc_ethercom;
5787 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5788 	struct ether_multi *enm;
5789 	struct ether_multistep step;
5790 	bus_addr_t mta_reg;
5791 	uint32_t hash, reg, bit;
5792 	int i, size;
5793 
5794 	if (sc->sc_type >= WM_T_82544)
5795 		mta_reg = WMREG_CORDOVA_MTA;
5796 	else
5797 		mta_reg = WMREG_MTA;
5798 
5799 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
5800 
5801 	if (ifp->if_flags & IFF_BROADCAST)
5802 		sc->sc_rctl |= RCTL_BAM;
5803 	if (ifp->if_flags & IFF_PROMISC) {
5804 		sc->sc_rctl |= RCTL_UPE;
5805 		goto allmulti;
5806 	}
5807 
5808 	/*
5809 	 * Set the station address in the first RAL slot, and
5810 	 * clear the remaining slots.
5811 	 */
5812 	if (sc->sc_type == WM_T_ICH8)
5813 		size = WM_RAL_TABSIZE_ICH8 -1;
5814 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
5815 	    || (sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
5816 	    || (sc->sc_type == WM_T_PCH_LPT))
5817 		size = WM_RAL_TABSIZE_ICH8;
5818 	else if (sc->sc_type == WM_T_82575)
5819 		size = WM_RAL_TABSIZE_82575;
5820 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
5821 		size = WM_RAL_TABSIZE_82576;
5822 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5823 		size = WM_RAL_TABSIZE_I350;
5824 	else
5825 		size = WM_RAL_TABSIZE;
5826 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
5827 	for (i = 1; i < size; i++)
5828 		wm_set_ral(sc, NULL, i);
5829 
5830 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5831 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5832 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
5833 		size = WM_ICH8_MC_TABSIZE;
5834 	else
5835 		size = WM_MC_TABSIZE;
5836 	/* Clear out the multicast table. */
5837 	for (i = 0; i < size; i++)
5838 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
5839 
5840 	ETHER_FIRST_MULTI(step, ec, enm);
5841 	while (enm != NULL) {
5842 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
5843 			/*
5844 			 * We must listen to a range of multicast addresses.
5845 			 * For now, just accept all multicasts, rather than
5846 			 * trying to set only those filter bits needed to match
5847 			 * the range.  (At this time, the only use of address
5848 			 * ranges is for IP multicast routing, for which the
5849 			 * range is big enough to require all bits set.)
5850 			 */
5851 			goto allmulti;
5852 		}
5853 
5854 		hash = wm_mchash(sc, enm->enm_addrlo);
5855 
5856 		reg = (hash >> 5);
5857 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5858 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5859 		    || (sc->sc_type == WM_T_PCH2)
5860 		    || (sc->sc_type == WM_T_PCH_LPT))
5861 			reg &= 0x1f;
5862 		else
5863 			reg &= 0x7f;
5864 		bit = hash & 0x1f;
5865 
5866 		hash = CSR_READ(sc, mta_reg + (reg << 2));
5867 		hash |= 1U << bit;
5868 
5869 		/* XXX Hardware bug?? */
5870 		if (sc->sc_type == WM_T_82544 && (reg & 0xe) == 1) {
5871 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
5872 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5873 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
5874 		} else
5875 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
5876 
5877 		ETHER_NEXT_MULTI(step, enm);
5878 	}
5879 
5880 	ifp->if_flags &= ~IFF_ALLMULTI;
5881 	goto setit;
5882 
5883  allmulti:
5884 	ifp->if_flags |= IFF_ALLMULTI;
5885 	sc->sc_rctl |= RCTL_MPE;
5886 
5887  setit:
5888 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
5889 }
5890 
5891 /*
5892  * wm_tbi_mediainit:
5893  *
5894  *	Initialize media for use on 1000BASE-X devices.
5895  */
5896 static void
5897 wm_tbi_mediainit(struct wm_softc *sc)
5898 {
5899 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
5900 	const char *sep = "";
5901 
5902 	if (sc->sc_type < WM_T_82543)
5903 		sc->sc_tipg = TIPG_WM_DFLT;
5904 	else
5905 		sc->sc_tipg = TIPG_LG_DFLT;
5906 
5907 	sc->sc_tbi_anegticks = 5;
5908 
5909 	/* Initialize our media structures */
5910 	sc->sc_mii.mii_ifp = ifp;
5911 
5912 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
5913 	ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, wm_tbi_mediachange,
5914 	    wm_tbi_mediastatus);
5915 
5916 	/*
5917 	 * SWD Pins:
5918 	 *
5919 	 *	0 = Link LED (output)
5920 	 *	1 = Loss Of Signal (input)
5921 	 */
5922 	sc->sc_ctrl |= CTRL_SWDPIO(0);
5923 	sc->sc_ctrl &= ~CTRL_SWDPIO(1);
5924 
5925 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5926 
5927 #define	ADD(ss, mm, dd)							\
5928 do {									\
5929 	aprint_normal("%s%s", sep, ss);					\
5930 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|(mm), (dd), NULL);	\
5931 	sep = ", ";							\
5932 } while (/*CONSTCOND*/0)
5933 
5934 	aprint_normal_dev(sc->sc_dev, "");
5935 	ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
5936 	ADD("1000baseSX-FDX", IFM_1000_SX|IFM_FDX, ANAR_X_FD);
5937 	ADD("auto", IFM_AUTO, ANAR_X_FD|ANAR_X_HD);
5938 	aprint_normal("\n");
5939 
5940 #undef ADD
5941 
5942 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
5943 }
5944 
5945 /*
5946  * wm_tbi_mediastatus:	[ifmedia interface function]
5947  *
5948  *	Get the current interface media status on a 1000BASE-X device.
5949  */
5950 static void
5951 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
5952 {
5953 	struct wm_softc *sc = ifp->if_softc;
5954 	uint32_t ctrl, status;
5955 
5956 	ifmr->ifm_status = IFM_AVALID;
5957 	ifmr->ifm_active = IFM_ETHER;
5958 
5959 	status = CSR_READ(sc, WMREG_STATUS);
5960 	if ((status & STATUS_LU) == 0) {
5961 		ifmr->ifm_active |= IFM_NONE;
5962 		return;
5963 	}
5964 
5965 	ifmr->ifm_status |= IFM_ACTIVE;
5966 	ifmr->ifm_active |= IFM_1000_SX;
5967 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
5968 		ifmr->ifm_active |= IFM_FDX;
5969 	ctrl = CSR_READ(sc, WMREG_CTRL);
5970 	if (ctrl & CTRL_RFCE)
5971 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
5972 	if (ctrl & CTRL_TFCE)
5973 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
5974 }
5975 
5976 /*
5977  * wm_tbi_mediachange:	[ifmedia interface function]
5978  *
5979  *	Set hardware to newly-selected media on a 1000BASE-X device.
5980  */
5981 static int
5982 wm_tbi_mediachange(struct ifnet *ifp)
5983 {
5984 	struct wm_softc *sc = ifp->if_softc;
5985 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
5986 	uint32_t status;
5987 	int i;
5988 
5989 	sc->sc_txcw = 0;
5990 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO ||
5991 	    (sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
5992 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
5993 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
5994 		sc->sc_txcw |= TXCW_ANE;
5995 	} else {
5996 		/*
5997 		 * If autonegotiation is turned off, force link up and turn on
5998 		 * full duplex
5999 		 */
6000 		sc->sc_txcw &= ~TXCW_ANE;
6001 		sc->sc_ctrl |= CTRL_SLU | CTRL_FD;
6002 		sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
6003 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6004 		CSR_WRITE_FLUSH(sc);
6005 		delay(1000);
6006 	}
6007 
6008 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
6009 		    device_xname(sc->sc_dev),sc->sc_txcw));
6010 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6011 	CSR_WRITE_FLUSH(sc);
6012 	delay(10000);
6013 
6014 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
6015 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
6016 
6017 	/*
6018 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
6019 	 * optics detect a signal, 0 if they don't.
6020 	 */
6021 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
6022 		/* Have signal; wait for the link to come up. */
6023 
6024 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6025 			/*
6026 			 * Reset the link, and let autonegotiation do its thing
6027 			 */
6028 			sc->sc_ctrl |= CTRL_LRST;
6029 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6030 			CSR_WRITE_FLUSH(sc);
6031 			delay(1000);
6032 			sc->sc_ctrl &= ~CTRL_LRST;
6033 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6034 			CSR_WRITE_FLUSH(sc);
6035 			delay(1000);
6036 		}
6037 
6038 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
6039 			delay(10000);
6040 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
6041 				break;
6042 		}
6043 
6044 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
6045 			    device_xname(sc->sc_dev),i));
6046 
6047 		status = CSR_READ(sc, WMREG_STATUS);
6048 		DPRINTF(WM_DEBUG_LINK,
6049 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
6050 			device_xname(sc->sc_dev),status, STATUS_LU));
6051 		if (status & STATUS_LU) {
6052 			/* Link is up. */
6053 			DPRINTF(WM_DEBUG_LINK,
6054 			    ("%s: LINK: set media -> link up %s\n",
6055 			    device_xname(sc->sc_dev),
6056 			    (status & STATUS_FD) ? "FDX" : "HDX"));
6057 
6058 			/*
6059 			 * NOTE: CTRL will update TFCE and RFCE automatically,
6060 			 * so we should update sc->sc_ctrl
6061 			 */
6062 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
6063 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
6064 			sc->sc_fcrtl &= ~FCRTL_XONE;
6065 			if (status & STATUS_FD)
6066 				sc->sc_tctl |=
6067 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
6068 			else
6069 				sc->sc_tctl |=
6070 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
6071 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
6072 				sc->sc_fcrtl |= FCRTL_XONE;
6073 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
6074 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
6075 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
6076 				      sc->sc_fcrtl);
6077 			sc->sc_tbi_linkup = 1;
6078 		} else {
6079 			if (i == WM_LINKUP_TIMEOUT)
6080 				wm_check_for_link(sc);
6081 			/* Link is down. */
6082 			DPRINTF(WM_DEBUG_LINK,
6083 			    ("%s: LINK: set media -> link down\n",
6084 			    device_xname(sc->sc_dev)));
6085 			sc->sc_tbi_linkup = 0;
6086 		}
6087 	} else {
6088 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
6089 		    device_xname(sc->sc_dev)));
6090 		sc->sc_tbi_linkup = 0;
6091 	}
6092 
6093 	wm_tbi_set_linkled(sc);
6094 
6095 	return 0;
6096 }
6097 
6098 /*
6099  * wm_tbi_set_linkled:
6100  *
6101  *	Update the link LED on 1000BASE-X devices.
6102  */
6103 static void
6104 wm_tbi_set_linkled(struct wm_softc *sc)
6105 {
6106 
6107 	if (sc->sc_tbi_linkup)
6108 		sc->sc_ctrl |= CTRL_SWDPIN(0);
6109 	else
6110 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
6111 
6112 	/* 82540 or newer devices are active low */
6113 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
6114 
6115 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6116 }
6117 
6118 /*
6119  * wm_tbi_check_link:
6120  *
6121  *	Check the link on 1000BASE-X devices.
6122  */
6123 static void
6124 wm_tbi_check_link(struct wm_softc *sc)
6125 {
6126 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6127 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6128 	uint32_t status;
6129 
6130 	status = CSR_READ(sc, WMREG_STATUS);
6131 
6132 	/* XXX is this needed? */
6133 	(void)CSR_READ(sc, WMREG_RXCW);
6134 	(void)CSR_READ(sc, WMREG_CTRL);
6135 
6136 	/* set link status */
6137 	if ((status & STATUS_LU) == 0) {
6138 		DPRINTF(WM_DEBUG_LINK,
6139 		    ("%s: LINK: checklink -> down\n", device_xname(sc->sc_dev)));
6140 		sc->sc_tbi_linkup = 0;
6141 	} else if (sc->sc_tbi_linkup == 0) {
6142 		DPRINTF(WM_DEBUG_LINK,
6143 		    ("%s: LINK: checklink -> up %s\n", device_xname(sc->sc_dev),
6144 		    (status & STATUS_FD) ? "FDX" : "HDX"));
6145 		sc->sc_tbi_linkup = 1;
6146 	}
6147 
6148 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP)
6149 	    && ((status & STATUS_LU) == 0)) {
6150 		sc->sc_tbi_linkup = 0;
6151 		if (sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg > 100) {
6152 			/* RXCFG storm! */
6153 			DPRINTF(WM_DEBUG_LINK, ("RXCFG storm! (%d)\n",
6154 				sc->sc_tbi_nrxcfg - sc->sc_tbi_lastnrxcfg));
6155 			wm_init(ifp);
6156 			ifp->if_start(ifp);
6157 		} else if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
6158 			/* If the timer expired, retry autonegotiation */
6159 			if (++sc->sc_tbi_ticks >= sc->sc_tbi_anegticks) {
6160 				DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
6161 				sc->sc_tbi_ticks = 0;
6162 				/*
6163 				 * Reset the link, and let autonegotiation do
6164 				 * its thing
6165 				 */
6166 				sc->sc_ctrl |= CTRL_LRST;
6167 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6168 				CSR_WRITE_FLUSH(sc);
6169 				delay(1000);
6170 				sc->sc_ctrl &= ~CTRL_LRST;
6171 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6172 				CSR_WRITE_FLUSH(sc);
6173 				delay(1000);
6174 				CSR_WRITE(sc, WMREG_TXCW,
6175 				    sc->sc_txcw & ~TXCW_ANE);
6176 				CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
6177 			}
6178 		}
6179 	}
6180 
6181 	wm_tbi_set_linkled(sc);
6182 }
6183 
6184 /*
6185  * wm_gmii_reset:
6186  *
6187  *	Reset the PHY.
6188  */
6189 static void
6190 wm_gmii_reset(struct wm_softc *sc)
6191 {
6192 	uint32_t reg;
6193 	int rv;
6194 
6195 	/* get phy semaphore */
6196 	switch (sc->sc_type) {
6197 	case WM_T_82571:
6198 	case WM_T_82572:
6199 	case WM_T_82573:
6200 	case WM_T_82574:
6201 	case WM_T_82583:
6202 		 /* XXX should get sw semaphore, too */
6203 		rv = wm_get_swsm_semaphore(sc);
6204 		break;
6205 	case WM_T_82575:
6206 	case WM_T_82576:
6207 	case WM_T_82580:
6208 	case WM_T_82580ER:
6209 	case WM_T_I350:
6210 	case WM_T_I354:
6211 	case WM_T_I210:
6212 	case WM_T_I211:
6213 	case WM_T_80003:
6214 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6215 		break;
6216 	case WM_T_ICH8:
6217 	case WM_T_ICH9:
6218 	case WM_T_ICH10:
6219 	case WM_T_PCH:
6220 	case WM_T_PCH2:
6221 	case WM_T_PCH_LPT:
6222 		rv = wm_get_swfwhw_semaphore(sc);
6223 		break;
6224 	default:
6225 		/* nothing to do*/
6226 		rv = 0;
6227 		break;
6228 	}
6229 	if (rv != 0) {
6230 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6231 		    __func__);
6232 		return;
6233 	}
6234 
6235 	switch (sc->sc_type) {
6236 	case WM_T_82542_2_0:
6237 	case WM_T_82542_2_1:
6238 		/* null */
6239 		break;
6240 	case WM_T_82543:
6241 		/*
6242 		 * With 82543, we need to force speed and duplex on the MAC
6243 		 * equal to what the PHY speed and duplex configuration is.
6244 		 * In addition, we need to perform a hardware reset on the PHY
6245 		 * to take it out of reset.
6246 		 */
6247 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6248 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6249 
6250 		/* The PHY reset pin is active-low. */
6251 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
6252 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
6253 		    CTRL_EXT_SWDPIN(4));
6254 		reg |= CTRL_EXT_SWDPIO(4);
6255 
6256 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
6257 		CSR_WRITE_FLUSH(sc);
6258 		delay(10*1000);
6259 
6260 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
6261 		CSR_WRITE_FLUSH(sc);
6262 		delay(150);
6263 #if 0
6264 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
6265 #endif
6266 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
6267 		break;
6268 	case WM_T_82544:	/* reset 10000us */
6269 	case WM_T_82540:
6270 	case WM_T_82545:
6271 	case WM_T_82545_3:
6272 	case WM_T_82546:
6273 	case WM_T_82546_3:
6274 	case WM_T_82541:
6275 	case WM_T_82541_2:
6276 	case WM_T_82547:
6277 	case WM_T_82547_2:
6278 	case WM_T_82571:	/* reset 100us */
6279 	case WM_T_82572:
6280 	case WM_T_82573:
6281 	case WM_T_82574:
6282 	case WM_T_82575:
6283 	case WM_T_82576:
6284 	case WM_T_82580:
6285 	case WM_T_82580ER:
6286 	case WM_T_I350:
6287 	case WM_T_I354:
6288 	case WM_T_I210:
6289 	case WM_T_I211:
6290 	case WM_T_82583:
6291 	case WM_T_80003:
6292 		/* generic reset */
6293 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6294 		CSR_WRITE_FLUSH(sc);
6295 		delay(20000);
6296 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6297 		CSR_WRITE_FLUSH(sc);
6298 		delay(20000);
6299 
6300 		if ((sc->sc_type == WM_T_82541)
6301 		    || (sc->sc_type == WM_T_82541_2)
6302 		    || (sc->sc_type == WM_T_82547)
6303 		    || (sc->sc_type == WM_T_82547_2)) {
6304 			/* workaround for igp are done in igp_reset() */
6305 			/* XXX add code to set LED after phy reset */
6306 		}
6307 		break;
6308 	case WM_T_ICH8:
6309 	case WM_T_ICH9:
6310 	case WM_T_ICH10:
6311 	case WM_T_PCH:
6312 	case WM_T_PCH2:
6313 	case WM_T_PCH_LPT:
6314 		/* generic reset */
6315 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
6316 		CSR_WRITE_FLUSH(sc);
6317 		delay(100);
6318 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6319 		CSR_WRITE_FLUSH(sc);
6320 		delay(150);
6321 		break;
6322 	default:
6323 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
6324 		    __func__);
6325 		break;
6326 	}
6327 
6328 	/* release PHY semaphore */
6329 	switch (sc->sc_type) {
6330 	case WM_T_82571:
6331 	case WM_T_82572:
6332 	case WM_T_82573:
6333 	case WM_T_82574:
6334 	case WM_T_82583:
6335 		 /* XXX should put sw semaphore, too */
6336 		wm_put_swsm_semaphore(sc);
6337 		break;
6338 	case WM_T_82575:
6339 	case WM_T_82576:
6340 	case WM_T_82580:
6341 	case WM_T_82580ER:
6342 	case WM_T_I350:
6343 	case WM_T_I354:
6344 	case WM_T_I210:
6345 	case WM_T_I211:
6346 	case WM_T_80003:
6347 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
6348 		break;
6349 	case WM_T_ICH8:
6350 	case WM_T_ICH9:
6351 	case WM_T_ICH10:
6352 	case WM_T_PCH:
6353 	case WM_T_PCH2:
6354 	case WM_T_PCH_LPT:
6355 		wm_put_swfwhw_semaphore(sc);
6356 		break;
6357 	default:
6358 		/* nothing to do*/
6359 		rv = 0;
6360 		break;
6361 	}
6362 
6363 	/* get_cfg_done */
6364 	wm_get_cfg_done(sc);
6365 
6366 	/* extra setup */
6367 	switch (sc->sc_type) {
6368 	case WM_T_82542_2_0:
6369 	case WM_T_82542_2_1:
6370 	case WM_T_82543:
6371 	case WM_T_82544:
6372 	case WM_T_82540:
6373 	case WM_T_82545:
6374 	case WM_T_82545_3:
6375 	case WM_T_82546:
6376 	case WM_T_82546_3:
6377 	case WM_T_82541_2:
6378 	case WM_T_82547_2:
6379 	case WM_T_82571:
6380 	case WM_T_82572:
6381 	case WM_T_82573:
6382 	case WM_T_82574:
6383 	case WM_T_82575:
6384 	case WM_T_82576:
6385 	case WM_T_82580:
6386 	case WM_T_82580ER:
6387 	case WM_T_I350:
6388 	case WM_T_I354:
6389 	case WM_T_I210:
6390 	case WM_T_I211:
6391 	case WM_T_82583:
6392 	case WM_T_80003:
6393 		/* null */
6394 		break;
6395 	case WM_T_82541:
6396 	case WM_T_82547:
6397 		/* XXX Configure actively LED after PHY reset */
6398 		break;
6399 	case WM_T_ICH8:
6400 	case WM_T_ICH9:
6401 	case WM_T_ICH10:
6402 	case WM_T_PCH:
6403 	case WM_T_PCH2:
6404 	case WM_T_PCH_LPT:
6405 		/* Allow time for h/w to get to a quiescent state afer reset */
6406 		delay(10*1000);
6407 
6408 		if (sc->sc_type == WM_T_PCH)
6409 			wm_hv_phy_workaround_ich8lan(sc);
6410 
6411 		if (sc->sc_type == WM_T_PCH2)
6412 			wm_lv_phy_workaround_ich8lan(sc);
6413 
6414 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
6415 			/*
6416 			 * dummy read to clear the phy wakeup bit after lcd
6417 			 * reset
6418 			 */
6419 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
6420 		}
6421 
6422 		/*
6423 		 * XXX Configure the LCD with th extended configuration region
6424 		 * in NVM
6425 		 */
6426 
6427 		/* Configure the LCD with the OEM bits in NVM */
6428 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
6429 		    || (sc->sc_type == WM_T_PCH_LPT)) {
6430 			/*
6431 			 * Disable LPLU.
6432 			 * XXX It seems that 82567 has LPLU, too.
6433 			 */
6434 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
6435 			reg &= ~(HV_OEM_BITS_A1KDIS| HV_OEM_BITS_LPLU);
6436 			reg |= HV_OEM_BITS_ANEGNOW;
6437 			wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
6438 		}
6439 		break;
6440 	default:
6441 		panic("%s: unknown type\n", __func__);
6442 		break;
6443 	}
6444 }
6445 
6446 /*
6447  * wm_get_phy_id_82575:
6448  *
6449  * Return PHY ID. Return -1 if it failed.
6450  */
6451 static int
6452 wm_get_phy_id_82575(struct wm_softc *sc)
6453 {
6454 	uint32_t reg;
6455 	int phyid = -1;
6456 
6457 	/* XXX */
6458 	if ((sc->sc_flags & WM_F_SGMII) == 0)
6459 		return -1;
6460 
6461 	if (wm_sgmii_uses_mdio(sc)) {
6462 		switch (sc->sc_type) {
6463 		case WM_T_82575:
6464 		case WM_T_82576:
6465 			reg = CSR_READ(sc, WMREG_MDIC);
6466 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
6467 			break;
6468 		case WM_T_82580:
6469 		case WM_T_I350:
6470 		case WM_T_I354:
6471 		case WM_T_I210:
6472 		case WM_T_I211:
6473 			reg = CSR_READ(sc, WMREG_MDICNFG);
6474 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
6475 			break;
6476 		default:
6477 			return -1;
6478 		}
6479 	}
6480 
6481 	return phyid;
6482 }
6483 
6484 
6485 /*
6486  * wm_gmii_mediainit:
6487  *
6488  *	Initialize media for use on 1000BASE-T devices.
6489  */
6490 static void
6491 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
6492 {
6493 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6494 	struct mii_data *mii = &sc->sc_mii;
6495 
6496 	/* We have MII. */
6497 	sc->sc_flags |= WM_F_HAS_MII;
6498 
6499 	if (sc->sc_type == WM_T_80003)
6500 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
6501 	else
6502 		sc->sc_tipg = TIPG_1000T_DFLT;
6503 
6504 	/*
6505 	 * Let the chip set speed/duplex on its own based on
6506 	 * signals from the PHY.
6507 	 * XXXbouyer - I'm not sure this is right for the 80003,
6508 	 * the em driver only sets CTRL_SLU here - but it seems to work.
6509 	 */
6510 	sc->sc_ctrl |= CTRL_SLU;
6511 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6512 
6513 	/* Initialize our media structures and probe the GMII. */
6514 	mii->mii_ifp = ifp;
6515 
6516 	/*
6517 	 * Determine the PHY access method.
6518 	 *
6519 	 *  For SGMII, use SGMII specific method.
6520 	 *
6521 	 *  For some devices, we can determine the PHY access method
6522 	 * from sc_type.
6523 	 *
6524 	 *  For ICH8 variants, it's difficult to detemine the PHY access
6525 	 * method by sc_type, so use the PCI product ID for some devices.
6526 	 * For other ICH8 variants, try to use igp's method. If the PHY
6527 	 * can't detect, then use bm's method.
6528 	 */
6529 	switch (prodid) {
6530 	case PCI_PRODUCT_INTEL_PCH_M_LM:
6531 	case PCI_PRODUCT_INTEL_PCH_M_LC:
6532 		/* 82577 */
6533 		sc->sc_phytype = WMPHY_82577;
6534 		mii->mii_readreg = wm_gmii_hv_readreg;
6535 		mii->mii_writereg = wm_gmii_hv_writereg;
6536 		break;
6537 	case PCI_PRODUCT_INTEL_PCH_D_DM:
6538 	case PCI_PRODUCT_INTEL_PCH_D_DC:
6539 		/* 82578 */
6540 		sc->sc_phytype = WMPHY_82578;
6541 		mii->mii_readreg = wm_gmii_hv_readreg;
6542 		mii->mii_writereg = wm_gmii_hv_writereg;
6543 		break;
6544 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
6545 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
6546 		/* 82579 */
6547 		sc->sc_phytype = WMPHY_82579;
6548 		mii->mii_readreg = wm_gmii_hv_readreg;
6549 		mii->mii_writereg = wm_gmii_hv_writereg;
6550 		break;
6551 	case PCI_PRODUCT_INTEL_I217_LM:
6552 	case PCI_PRODUCT_INTEL_I217_V:
6553 	case PCI_PRODUCT_INTEL_I218_LM:
6554 	case PCI_PRODUCT_INTEL_I218_V:
6555 		/* I21[78] */
6556 		mii->mii_readreg = wm_gmii_hv_readreg;
6557 		mii->mii_writereg = wm_gmii_hv_writereg;
6558 		break;
6559 	case PCI_PRODUCT_INTEL_82801I_BM:
6560 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
6561 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
6562 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
6563 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
6564 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
6565 		/* 82567 */
6566 		sc->sc_phytype = WMPHY_BM;
6567 		mii->mii_readreg = wm_gmii_bm_readreg;
6568 		mii->mii_writereg = wm_gmii_bm_writereg;
6569 		break;
6570 	default:
6571 		if (((sc->sc_flags & WM_F_SGMII) != 0)
6572 		    && !wm_sgmii_uses_mdio(sc)){
6573 			mii->mii_readreg = wm_sgmii_readreg;
6574 			mii->mii_writereg = wm_sgmii_writereg;
6575 		} else if (sc->sc_type >= WM_T_80003) {
6576 			mii->mii_readreg = wm_gmii_i80003_readreg;
6577 			mii->mii_writereg = wm_gmii_i80003_writereg;
6578 		} else if (sc->sc_type >= WM_T_I210) {
6579 			mii->mii_readreg = wm_gmii_i82544_readreg;
6580 			mii->mii_writereg = wm_gmii_i82544_writereg;
6581 		} else if (sc->sc_type >= WM_T_82580) {
6582 			sc->sc_phytype = WMPHY_82580;
6583 			mii->mii_readreg = wm_gmii_82580_readreg;
6584 			mii->mii_writereg = wm_gmii_82580_writereg;
6585 		} else if (sc->sc_type >= WM_T_82544) {
6586 			mii->mii_readreg = wm_gmii_i82544_readreg;
6587 			mii->mii_writereg = wm_gmii_i82544_writereg;
6588 		} else {
6589 			mii->mii_readreg = wm_gmii_i82543_readreg;
6590 			mii->mii_writereg = wm_gmii_i82543_writereg;
6591 		}
6592 		break;
6593 	}
6594 	mii->mii_statchg = wm_gmii_statchg;
6595 
6596 	wm_gmii_reset(sc);
6597 
6598 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
6599 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
6600 	    wm_gmii_mediastatus);
6601 
6602 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
6603 	    || (sc->sc_type == WM_T_82580) || (sc->sc_type == WM_T_82580ER)
6604 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
6605 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
6606 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
6607 			/* Attach only one port */
6608 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
6609 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
6610 		} else {
6611 			int i, id;
6612 			uint32_t ctrl_ext;
6613 
6614 			id = wm_get_phy_id_82575(sc);
6615 			if (id != -1) {
6616 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
6617 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
6618 			}
6619 			if ((id == -1)
6620 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
6621 				/* Power on sgmii phy if it is disabled */
6622 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
6623 				CSR_WRITE(sc, WMREG_CTRL_EXT,
6624 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
6625 				CSR_WRITE_FLUSH(sc);
6626 				delay(300*1000); /* XXX too long */
6627 
6628 				/* from 1 to 8 */
6629 				for (i = 1; i < 8; i++)
6630 					mii_attach(sc->sc_dev, &sc->sc_mii,
6631 					    0xffffffff, i, MII_OFFSET_ANY,
6632 					    MIIF_DOPAUSE);
6633 
6634 				/* restore previous sfp cage power state */
6635 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
6636 			}
6637 		}
6638 	} else {
6639 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6640 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6641 	}
6642 
6643 	/*
6644 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
6645 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
6646 	 */
6647 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
6648 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
6649 		wm_set_mdio_slow_mode_hv(sc);
6650 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6651 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6652 	}
6653 
6654 	/*
6655 	 * (For ICH8 variants)
6656 	 * If PHY detection failed, use BM's r/w function and retry.
6657 	 */
6658 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6659 		/* if failed, retry with *_bm_* */
6660 		mii->mii_readreg = wm_gmii_bm_readreg;
6661 		mii->mii_writereg = wm_gmii_bm_writereg;
6662 
6663 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
6664 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
6665 	}
6666 
6667 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
6668 		/* Any PHY wasn't find */
6669 		ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL);
6670 		ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE);
6671 		sc->sc_phytype = WMPHY_NONE;
6672 	} else {
6673 		/*
6674 		 * PHY Found!
6675 		 * Check PHY type.
6676 		 */
6677 		uint32_t model;
6678 		struct mii_softc *child;
6679 
6680 		child = LIST_FIRST(&mii->mii_phys);
6681 		if (device_is_a(child->mii_dev, "igphy")) {
6682 			struct igphy_softc *isc = (struct igphy_softc *)child;
6683 
6684 			model = isc->sc_mii.mii_mpd_model;
6685 			if (model == MII_MODEL_yyINTEL_I82566)
6686 				sc->sc_phytype = WMPHY_IGP_3;
6687 		}
6688 
6689 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
6690 	}
6691 }
6692 
6693 /*
6694  * wm_gmii_mediastatus:	[ifmedia interface function]
6695  *
6696  *	Get the current interface media status on a 1000BASE-T device.
6697  */
6698 static void
6699 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
6700 {
6701 	struct wm_softc *sc = ifp->if_softc;
6702 
6703 	ether_mediastatus(ifp, ifmr);
6704 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
6705 	    | sc->sc_flowflags;
6706 }
6707 
6708 /*
6709  * wm_gmii_mediachange:	[ifmedia interface function]
6710  *
6711  *	Set hardware to newly-selected media on a 1000BASE-T device.
6712  */
6713 static int
6714 wm_gmii_mediachange(struct ifnet *ifp)
6715 {
6716 	struct wm_softc *sc = ifp->if_softc;
6717 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
6718 	int rc;
6719 
6720 	if ((ifp->if_flags & IFF_UP) == 0)
6721 		return 0;
6722 
6723 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
6724 	sc->sc_ctrl |= CTRL_SLU;
6725 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
6726 	    || (sc->sc_type > WM_T_82543)) {
6727 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
6728 	} else {
6729 		sc->sc_ctrl &= ~CTRL_ASDE;
6730 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
6731 		if (ife->ifm_media & IFM_FDX)
6732 			sc->sc_ctrl |= CTRL_FD;
6733 		switch (IFM_SUBTYPE(ife->ifm_media)) {
6734 		case IFM_10_T:
6735 			sc->sc_ctrl |= CTRL_SPEED_10;
6736 			break;
6737 		case IFM_100_TX:
6738 			sc->sc_ctrl |= CTRL_SPEED_100;
6739 			break;
6740 		case IFM_1000_T:
6741 			sc->sc_ctrl |= CTRL_SPEED_1000;
6742 			break;
6743 		default:
6744 			panic("wm_gmii_mediachange: bad media 0x%x",
6745 			    ife->ifm_media);
6746 		}
6747 	}
6748 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
6749 	if (sc->sc_type <= WM_T_82543)
6750 		wm_gmii_reset(sc);
6751 
6752 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
6753 		return 0;
6754 	return rc;
6755 }
6756 
6757 #define	MDI_IO		CTRL_SWDPIN(2)
6758 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
6759 #define	MDI_CLK		CTRL_SWDPIN(3)
6760 
6761 static void
6762 i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
6763 {
6764 	uint32_t i, v;
6765 
6766 	v = CSR_READ(sc, WMREG_CTRL);
6767 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6768 	v |= MDI_DIR | CTRL_SWDPIO(3);
6769 
6770 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
6771 		if (data & i)
6772 			v |= MDI_IO;
6773 		else
6774 			v &= ~MDI_IO;
6775 		CSR_WRITE(sc, WMREG_CTRL, v);
6776 		CSR_WRITE_FLUSH(sc);
6777 		delay(10);
6778 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6779 		CSR_WRITE_FLUSH(sc);
6780 		delay(10);
6781 		CSR_WRITE(sc, WMREG_CTRL, v);
6782 		CSR_WRITE_FLUSH(sc);
6783 		delay(10);
6784 	}
6785 }
6786 
6787 static uint32_t
6788 i82543_mii_recvbits(struct wm_softc *sc)
6789 {
6790 	uint32_t v, i, data = 0;
6791 
6792 	v = CSR_READ(sc, WMREG_CTRL);
6793 	v &= ~(MDI_IO|MDI_CLK|(CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
6794 	v |= CTRL_SWDPIO(3);
6795 
6796 	CSR_WRITE(sc, WMREG_CTRL, v);
6797 	CSR_WRITE_FLUSH(sc);
6798 	delay(10);
6799 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6800 	CSR_WRITE_FLUSH(sc);
6801 	delay(10);
6802 	CSR_WRITE(sc, WMREG_CTRL, v);
6803 	CSR_WRITE_FLUSH(sc);
6804 	delay(10);
6805 
6806 	for (i = 0; i < 16; i++) {
6807 		data <<= 1;
6808 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6809 		CSR_WRITE_FLUSH(sc);
6810 		delay(10);
6811 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
6812 			data |= 1;
6813 		CSR_WRITE(sc, WMREG_CTRL, v);
6814 		CSR_WRITE_FLUSH(sc);
6815 		delay(10);
6816 	}
6817 
6818 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
6819 	CSR_WRITE_FLUSH(sc);
6820 	delay(10);
6821 	CSR_WRITE(sc, WMREG_CTRL, v);
6822 	CSR_WRITE_FLUSH(sc);
6823 	delay(10);
6824 
6825 	return data;
6826 }
6827 
6828 #undef MDI_IO
6829 #undef MDI_DIR
6830 #undef MDI_CLK
6831 
6832 /*
6833  * wm_gmii_i82543_readreg:	[mii interface function]
6834  *
6835  *	Read a PHY register on the GMII (i82543 version).
6836  */
6837 static int
6838 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
6839 {
6840 	struct wm_softc *sc = device_private(self);
6841 	int rv;
6842 
6843 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
6844 	i82543_mii_sendbits(sc, reg | (phy << 5) |
6845 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
6846 	rv = i82543_mii_recvbits(sc) & 0xffff;
6847 
6848 	DPRINTF(WM_DEBUG_GMII,
6849 	    ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
6850 	    device_xname(sc->sc_dev), phy, reg, rv));
6851 
6852 	return rv;
6853 }
6854 
6855 /*
6856  * wm_gmii_i82543_writereg:	[mii interface function]
6857  *
6858  *	Write a PHY register on the GMII (i82543 version).
6859  */
6860 static void
6861 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
6862 {
6863 	struct wm_softc *sc = device_private(self);
6864 
6865 	i82543_mii_sendbits(sc, 0xffffffffU, 32);
6866 	i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
6867 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
6868 	    (MII_COMMAND_START << 30), 32);
6869 }
6870 
6871 /*
6872  * wm_gmii_i82544_readreg:	[mii interface function]
6873  *
6874  *	Read a PHY register on the GMII.
6875  */
6876 static int
6877 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
6878 {
6879 	struct wm_softc *sc = device_private(self);
6880 	uint32_t mdic = 0;
6881 	int i, rv;
6882 
6883 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
6884 	    MDIC_REGADD(reg));
6885 
6886 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6887 		mdic = CSR_READ(sc, WMREG_MDIC);
6888 		if (mdic & MDIC_READY)
6889 			break;
6890 		delay(50);
6891 	}
6892 
6893 	if ((mdic & MDIC_READY) == 0) {
6894 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
6895 		    device_xname(sc->sc_dev), phy, reg);
6896 		rv = 0;
6897 	} else if (mdic & MDIC_E) {
6898 #if 0 /* This is normal if no PHY is present. */
6899 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
6900 		    device_xname(sc->sc_dev), phy, reg);
6901 #endif
6902 		rv = 0;
6903 	} else {
6904 		rv = MDIC_DATA(mdic);
6905 		if (rv == 0xffff)
6906 			rv = 0;
6907 	}
6908 
6909 	return rv;
6910 }
6911 
6912 /*
6913  * wm_gmii_i82544_writereg:	[mii interface function]
6914  *
6915  *	Write a PHY register on the GMII.
6916  */
6917 static void
6918 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
6919 {
6920 	struct wm_softc *sc = device_private(self);
6921 	uint32_t mdic = 0;
6922 	int i;
6923 
6924 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
6925 	    MDIC_REGADD(reg) | MDIC_DATA(val));
6926 
6927 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
6928 		mdic = CSR_READ(sc, WMREG_MDIC);
6929 		if (mdic & MDIC_READY)
6930 			break;
6931 		delay(50);
6932 	}
6933 
6934 	if ((mdic & MDIC_READY) == 0)
6935 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
6936 		    device_xname(sc->sc_dev), phy, reg);
6937 	else if (mdic & MDIC_E)
6938 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
6939 		    device_xname(sc->sc_dev), phy, reg);
6940 }
6941 
6942 /*
6943  * wm_gmii_i80003_readreg:	[mii interface function]
6944  *
6945  *	Read a PHY register on the kumeran
6946  * This could be handled by the PHY layer if we didn't have to lock the
6947  * ressource ...
6948  */
6949 static int
6950 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
6951 {
6952 	struct wm_softc *sc = device_private(self);
6953 	int sem;
6954 	int rv;
6955 
6956 	if (phy != 1) /* only one PHY on kumeran bus */
6957 		return 0;
6958 
6959 	sem = swfwphysem[sc->sc_funcid];
6960 	if (wm_get_swfw_semaphore(sc, sem)) {
6961 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
6962 		    __func__);
6963 		return 0;
6964 	}
6965 
6966 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
6967 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
6968 		    reg >> GG82563_PAGE_SHIFT);
6969 	} else {
6970 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
6971 		    reg >> GG82563_PAGE_SHIFT);
6972 	}
6973 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
6974 	delay(200);
6975 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
6976 	delay(200);
6977 
6978 	wm_put_swfw_semaphore(sc, sem);
6979 	return rv;
6980 }
6981 
6982 /*
6983  * wm_gmii_i80003_writereg:	[mii interface function]
6984  *
6985  *	Write a PHY register on the kumeran.
6986  * This could be handled by the PHY layer if we didn't have to lock the
6987  * ressource ...
6988  */
6989 static void
6990 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
6991 {
6992 	struct wm_softc *sc = device_private(self);
6993 	int sem;
6994 
6995 	if (phy != 1) /* only one PHY on kumeran bus */
6996 		return;
6997 
6998 	sem = swfwphysem[sc->sc_funcid];
6999 	if (wm_get_swfw_semaphore(sc, sem)) {
7000 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7001 		    __func__);
7002 		return;
7003 	}
7004 
7005 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
7006 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
7007 		    reg >> GG82563_PAGE_SHIFT);
7008 	} else {
7009 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
7010 		    reg >> GG82563_PAGE_SHIFT);
7011 	}
7012 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
7013 	delay(200);
7014 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7015 	delay(200);
7016 
7017 	wm_put_swfw_semaphore(sc, sem);
7018 }
7019 
7020 /*
7021  * wm_gmii_bm_readreg:	[mii interface function]
7022  *
7023  *	Read a PHY register on the kumeran
7024  * This could be handled by the PHY layer if we didn't have to lock the
7025  * ressource ...
7026  */
7027 static int
7028 wm_gmii_bm_readreg(device_t self, int phy, int reg)
7029 {
7030 	struct wm_softc *sc = device_private(self);
7031 	int sem;
7032 	int rv;
7033 
7034 	sem = swfwphysem[sc->sc_funcid];
7035 	if (wm_get_swfw_semaphore(sc, sem)) {
7036 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7037 		    __func__);
7038 		return 0;
7039 	}
7040 
7041 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7042 		if (phy == 1)
7043 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7044 			    reg);
7045 		else
7046 			wm_gmii_i82544_writereg(self, phy,
7047 			    GG82563_PHY_PAGE_SELECT,
7048 			    reg >> GG82563_PAGE_SHIFT);
7049 	}
7050 
7051 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
7052 	wm_put_swfw_semaphore(sc, sem);
7053 	return rv;
7054 }
7055 
7056 /*
7057  * wm_gmii_bm_writereg:	[mii interface function]
7058  *
7059  *	Write a PHY register on the kumeran.
7060  * This could be handled by the PHY layer if we didn't have to lock the
7061  * ressource ...
7062  */
7063 static void
7064 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
7065 {
7066 	struct wm_softc *sc = device_private(self);
7067 	int sem;
7068 
7069 	sem = swfwphysem[sc->sc_funcid];
7070 	if (wm_get_swfw_semaphore(sc, sem)) {
7071 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7072 		    __func__);
7073 		return;
7074 	}
7075 
7076 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
7077 		if (phy == 1)
7078 			wm_gmii_i82544_writereg(self, phy, MII_IGPHY_PAGE_SELECT,
7079 			    reg);
7080 		else
7081 			wm_gmii_i82544_writereg(self, phy,
7082 			    GG82563_PHY_PAGE_SELECT,
7083 			    reg >> GG82563_PAGE_SHIFT);
7084 	}
7085 
7086 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
7087 	wm_put_swfw_semaphore(sc, sem);
7088 }
7089 
7090 static void
7091 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
7092 {
7093 	struct wm_softc *sc = device_private(self);
7094 	uint16_t regnum = BM_PHY_REG_NUM(offset);
7095 	uint16_t wuce;
7096 
7097 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
7098 	if (sc->sc_type == WM_T_PCH) {
7099 		/* XXX e1000 driver do nothing... why? */
7100 	}
7101 
7102 	/* Set page 769 */
7103 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7104 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7105 
7106 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
7107 
7108 	wuce &= ~BM_WUC_HOST_WU_BIT;
7109 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
7110 	    wuce | BM_WUC_ENABLE_BIT);
7111 
7112 	/* Select page 800 */
7113 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7114 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
7115 
7116 	/* Write page 800 */
7117 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
7118 
7119 	if (rd)
7120 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
7121 	else
7122 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
7123 
7124 	/* Set page 769 */
7125 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7126 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
7127 
7128 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
7129 }
7130 
7131 /*
7132  * wm_gmii_hv_readreg:	[mii interface function]
7133  *
7134  *	Read a PHY register on the kumeran
7135  * This could be handled by the PHY layer if we didn't have to lock the
7136  * ressource ...
7137  */
7138 static int
7139 wm_gmii_hv_readreg(device_t self, int phy, int reg)
7140 {
7141 	struct wm_softc *sc = device_private(self);
7142 	uint16_t page = BM_PHY_REG_PAGE(reg);
7143 	uint16_t regnum = BM_PHY_REG_NUM(reg);
7144 	uint16_t val;
7145 	int rv;
7146 
7147 	if (wm_get_swfwhw_semaphore(sc)) {
7148 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7149 		    __func__);
7150 		return 0;
7151 	}
7152 
7153 	/* XXX Workaround failure in MDIO access while cable is disconnected */
7154 	if (sc->sc_phytype == WMPHY_82577) {
7155 		/* XXX must write */
7156 	}
7157 
7158 	/* Page 800 works differently than the rest so it has its own func */
7159 	if (page == BM_WUC_PAGE) {
7160 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
7161 		return val;
7162 	}
7163 
7164 	/*
7165 	 * Lower than page 768 works differently than the rest so it has its
7166 	 * own func
7167 	 */
7168 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7169 		printf("gmii_hv_readreg!!!\n");
7170 		return 0;
7171 	}
7172 
7173 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7174 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7175 		    page << BME1000_PAGE_SHIFT);
7176 	}
7177 
7178 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
7179 	wm_put_swfwhw_semaphore(sc);
7180 	return rv;
7181 }
7182 
7183 /*
7184  * wm_gmii_hv_writereg:	[mii interface function]
7185  *
7186  *	Write a PHY register on the kumeran.
7187  * This could be handled by the PHY layer if we didn't have to lock the
7188  * ressource ...
7189  */
7190 static void
7191 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
7192 {
7193 	struct wm_softc *sc = device_private(self);
7194 	uint16_t page = BM_PHY_REG_PAGE(reg);
7195 	uint16_t regnum = BM_PHY_REG_NUM(reg);
7196 
7197 	if (wm_get_swfwhw_semaphore(sc)) {
7198 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7199 		    __func__);
7200 		return;
7201 	}
7202 
7203 	/* XXX Workaround failure in MDIO access while cable is disconnected */
7204 
7205 	/* Page 800 works differently than the rest so it has its own func */
7206 	if (page == BM_WUC_PAGE) {
7207 		uint16_t tmp;
7208 
7209 		tmp = val;
7210 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
7211 		return;
7212 	}
7213 
7214 	/*
7215 	 * Lower than page 768 works differently than the rest so it has its
7216 	 * own func
7217 	 */
7218 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
7219 		printf("gmii_hv_writereg!!!\n");
7220 		return;
7221 	}
7222 
7223 	/*
7224 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
7225 	 * Power Down (whenever bit 11 of the PHY control register is set)
7226 	 */
7227 
7228 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
7229 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
7230 		    page << BME1000_PAGE_SHIFT);
7231 	}
7232 
7233 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
7234 	wm_put_swfwhw_semaphore(sc);
7235 }
7236 
7237 /*
7238  * wm_sgmii_uses_mdio
7239  *
7240  * Check whether the transaction is to the internal PHY or the external
7241  * MDIO interface. Return true if it's MDIO.
7242  */
7243 static bool
7244 wm_sgmii_uses_mdio(struct wm_softc *sc)
7245 {
7246 	uint32_t reg;
7247 	bool ismdio = false;
7248 
7249 	switch (sc->sc_type) {
7250 	case WM_T_82575:
7251 	case WM_T_82576:
7252 		reg = CSR_READ(sc, WMREG_MDIC);
7253 		ismdio = ((reg & MDIC_DEST) != 0);
7254 		break;
7255 	case WM_T_82580:
7256 	case WM_T_82580ER:
7257 	case WM_T_I350:
7258 	case WM_T_I354:
7259 	case WM_T_I210:
7260 	case WM_T_I211:
7261 		reg = CSR_READ(sc, WMREG_MDICNFG);
7262 		ismdio = ((reg & MDICNFG_DEST) != 0);
7263 		break;
7264 	default:
7265 		break;
7266 	}
7267 
7268 	return ismdio;
7269 }
7270 
7271 /*
7272  * wm_sgmii_readreg:	[mii interface function]
7273  *
7274  *	Read a PHY register on the SGMII
7275  * This could be handled by the PHY layer if we didn't have to lock the
7276  * ressource ...
7277  */
7278 static int
7279 wm_sgmii_readreg(device_t self, int phy, int reg)
7280 {
7281 	struct wm_softc *sc = device_private(self);
7282 	uint32_t i2ccmd;
7283 	int i, rv;
7284 
7285 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7286 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7287 		    __func__);
7288 		return 0;
7289 	}
7290 
7291 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7292 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7293 	    | I2CCMD_OPCODE_READ;
7294 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7295 
7296 	/* Poll the ready bit */
7297 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7298 		delay(50);
7299 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7300 		if (i2ccmd & I2CCMD_READY)
7301 			break;
7302 	}
7303 	if ((i2ccmd & I2CCMD_READY) == 0)
7304 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
7305 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7306 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7307 
7308 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
7309 
7310 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7311 	return rv;
7312 }
7313 
7314 /*
7315  * wm_sgmii_writereg:	[mii interface function]
7316  *
7317  *	Write a PHY register on the SGMII.
7318  * This could be handled by the PHY layer if we didn't have to lock the
7319  * ressource ...
7320  */
7321 static void
7322 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
7323 {
7324 	struct wm_softc *sc = device_private(self);
7325 	uint32_t i2ccmd;
7326 	int i;
7327 
7328 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
7329 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7330 		    __func__);
7331 		return;
7332 	}
7333 
7334 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
7335 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
7336 	    | I2CCMD_OPCODE_WRITE;
7337 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
7338 
7339 	/* Poll the ready bit */
7340 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
7341 		delay(50);
7342 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
7343 		if (i2ccmd & I2CCMD_READY)
7344 			break;
7345 	}
7346 	if ((i2ccmd & I2CCMD_READY) == 0)
7347 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
7348 	if ((i2ccmd & I2CCMD_ERROR) != 0)
7349 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
7350 
7351 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
7352 }
7353 
7354 /*
7355  * wm_gmii_82580_readreg:	[mii interface function]
7356  *
7357  *	Read a PHY register on the 82580 and I350.
7358  * This could be handled by the PHY layer if we didn't have to lock the
7359  * ressource ...
7360  */
7361 static int
7362 wm_gmii_82580_readreg(device_t self, int phy, int reg)
7363 {
7364 	struct wm_softc *sc = device_private(self);
7365 	int sem;
7366 	int rv;
7367 
7368 	sem = swfwphysem[sc->sc_funcid];
7369 	if (wm_get_swfw_semaphore(sc, sem)) {
7370 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7371 		    __func__);
7372 		return 0;
7373 	}
7374 
7375 	rv = wm_gmii_i82544_readreg(self, phy, reg);
7376 
7377 	wm_put_swfw_semaphore(sc, sem);
7378 	return rv;
7379 }
7380 
7381 /*
7382  * wm_gmii_82580_writereg:	[mii interface function]
7383  *
7384  *	Write a PHY register on the 82580 and I350.
7385  * This could be handled by the PHY layer if we didn't have to lock the
7386  * ressource ...
7387  */
7388 static void
7389 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
7390 {
7391 	struct wm_softc *sc = device_private(self);
7392 	int sem;
7393 
7394 	sem = swfwphysem[sc->sc_funcid];
7395 	if (wm_get_swfw_semaphore(sc, sem)) {
7396 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7397 		    __func__);
7398 		return;
7399 	}
7400 
7401 	wm_gmii_i82544_writereg(self, phy, reg, val);
7402 
7403 	wm_put_swfw_semaphore(sc, sem);
7404 }
7405 
7406 /*
7407  * wm_gmii_statchg:	[mii interface function]
7408  *
7409  *	Callback from MII layer when media changes.
7410  */
7411 static void
7412 wm_gmii_statchg(struct ifnet *ifp)
7413 {
7414 	struct wm_softc *sc = ifp->if_softc;
7415 	struct mii_data *mii = &sc->sc_mii;
7416 
7417 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
7418 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7419 	sc->sc_fcrtl &= ~FCRTL_XONE;
7420 
7421 	/*
7422 	 * Get flow control negotiation result.
7423 	 */
7424 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
7425 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
7426 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
7427 		mii->mii_media_active &= ~IFM_ETH_FMASK;
7428 	}
7429 
7430 	if (sc->sc_flowflags & IFM_FLOW) {
7431 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
7432 			sc->sc_ctrl |= CTRL_TFCE;
7433 			sc->sc_fcrtl |= FCRTL_XONE;
7434 		}
7435 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
7436 			sc->sc_ctrl |= CTRL_RFCE;
7437 	}
7438 
7439 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
7440 		DPRINTF(WM_DEBUG_LINK,
7441 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
7442 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7443 	} else {
7444 		DPRINTF(WM_DEBUG_LINK,
7445 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
7446 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7447 	}
7448 
7449 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7450 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7451 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
7452 						 : WMREG_FCRTL, sc->sc_fcrtl);
7453 	if (sc->sc_type == WM_T_80003) {
7454 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
7455 		case IFM_1000_T:
7456 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7457 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
7458 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
7459 			break;
7460 		default:
7461 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
7462 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
7463 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
7464 			break;
7465 		}
7466 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
7467 	}
7468 }
7469 
7470 /*
7471  * wm_kmrn_readreg:
7472  *
7473  *	Read a kumeran register
7474  */
7475 static int
7476 wm_kmrn_readreg(struct wm_softc *sc, int reg)
7477 {
7478 	int rv;
7479 
7480 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7481 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7482 			aprint_error_dev(sc->sc_dev,
7483 			    "%s: failed to get semaphore\n", __func__);
7484 			return 0;
7485 		}
7486 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7487 		if (wm_get_swfwhw_semaphore(sc)) {
7488 			aprint_error_dev(sc->sc_dev,
7489 			    "%s: failed to get semaphore\n", __func__);
7490 			return 0;
7491 		}
7492 	}
7493 
7494 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7495 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7496 	    KUMCTRLSTA_REN);
7497 	CSR_WRITE_FLUSH(sc);
7498 	delay(2);
7499 
7500 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
7501 
7502 	if (sc->sc_flags == WM_F_SWFW_SYNC)
7503 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7504 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7505 		wm_put_swfwhw_semaphore(sc);
7506 
7507 	return rv;
7508 }
7509 
7510 /*
7511  * wm_kmrn_writereg:
7512  *
7513  *	Write a kumeran register
7514  */
7515 static void
7516 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
7517 {
7518 
7519 	if (sc->sc_flags == WM_F_SWFW_SYNC) {
7520 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
7521 			aprint_error_dev(sc->sc_dev,
7522 			    "%s: failed to get semaphore\n", __func__);
7523 			return;
7524 		}
7525 	} else if (sc->sc_flags == WM_F_SWFWHW_SYNC) {
7526 		if (wm_get_swfwhw_semaphore(sc)) {
7527 			aprint_error_dev(sc->sc_dev,
7528 			    "%s: failed to get semaphore\n", __func__);
7529 			return;
7530 		}
7531 	}
7532 
7533 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
7534 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
7535 	    (val & KUMCTRLSTA_MASK));
7536 
7537 	if (sc->sc_flags == WM_F_SWFW_SYNC)
7538 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
7539 	else if (sc->sc_flags == WM_F_SWFWHW_SYNC)
7540 		wm_put_swfwhw_semaphore(sc);
7541 }
7542 
7543 static int
7544 wm_is_onboard_nvm_eeprom(struct wm_softc *sc)
7545 {
7546 	uint32_t eecd = 0;
7547 
7548 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
7549 	    || sc->sc_type == WM_T_82583) {
7550 		eecd = CSR_READ(sc, WMREG_EECD);
7551 
7552 		/* Isolate bits 15 & 16 */
7553 		eecd = ((eecd >> 15) & 0x03);
7554 
7555 		/* If both bits are set, device is Flash type */
7556 		if (eecd == 0x03)
7557 			return 0;
7558 	}
7559 	return 1;
7560 }
7561 
7562 static int
7563 wm_get_swsm_semaphore(struct wm_softc *sc)
7564 {
7565 	int32_t timeout;
7566 	uint32_t swsm;
7567 
7568 	/* Get the FW semaphore. */
7569 	timeout = 1000 + 1; /* XXX */
7570 	while (timeout) {
7571 		swsm = CSR_READ(sc, WMREG_SWSM);
7572 		swsm |= SWSM_SWESMBI;
7573 		CSR_WRITE(sc, WMREG_SWSM, swsm);
7574 		/* if we managed to set the bit we got the semaphore. */
7575 		swsm = CSR_READ(sc, WMREG_SWSM);
7576 		if (swsm & SWSM_SWESMBI)
7577 			break;
7578 
7579 		delay(50);
7580 		timeout--;
7581 	}
7582 
7583 	if (timeout == 0) {
7584 		aprint_error_dev(sc->sc_dev, "could not acquire EEPROM GNT\n");
7585 		/* Release semaphores */
7586 		wm_put_swsm_semaphore(sc);
7587 		return 1;
7588 	}
7589 	return 0;
7590 }
7591 
7592 static void
7593 wm_put_swsm_semaphore(struct wm_softc *sc)
7594 {
7595 	uint32_t swsm;
7596 
7597 	swsm = CSR_READ(sc, WMREG_SWSM);
7598 	swsm &= ~(SWSM_SWESMBI);
7599 	CSR_WRITE(sc, WMREG_SWSM, swsm);
7600 }
7601 
7602 static int
7603 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7604 {
7605 	uint32_t swfw_sync;
7606 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
7607 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
7608 	int timeout = 200;
7609 
7610 	for (timeout = 0; timeout < 200; timeout++) {
7611 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7612 			if (wm_get_swsm_semaphore(sc)) {
7613 				aprint_error_dev(sc->sc_dev,
7614 				    "%s: failed to get semaphore\n",
7615 				    __func__);
7616 				return 1;
7617 			}
7618 		}
7619 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7620 		if ((swfw_sync & (swmask | fwmask)) == 0) {
7621 			swfw_sync |= swmask;
7622 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7623 			if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7624 				wm_put_swsm_semaphore(sc);
7625 			return 0;
7626 		}
7627 		if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7628 			wm_put_swsm_semaphore(sc);
7629 		delay(5000);
7630 	}
7631 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
7632 	    device_xname(sc->sc_dev), mask, swfw_sync);
7633 	return 1;
7634 }
7635 
7636 static void
7637 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
7638 {
7639 	uint32_t swfw_sync;
7640 
7641 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE) {
7642 		while (wm_get_swsm_semaphore(sc) != 0)
7643 			continue;
7644 	}
7645 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
7646 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
7647 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
7648 	if (sc->sc_flags & WM_F_EEPROM_SEMAPHORE)
7649 		wm_put_swsm_semaphore(sc);
7650 }
7651 
7652 static int
7653 wm_get_swfwhw_semaphore(struct wm_softc *sc)
7654 {
7655 	uint32_t ext_ctrl;
7656 	int timeout = 200;
7657 
7658 	for (timeout = 0; timeout < 200; timeout++) {
7659 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7660 		ext_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
7661 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7662 
7663 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7664 		if (ext_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
7665 			return 0;
7666 		delay(5000);
7667 	}
7668 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
7669 	    device_xname(sc->sc_dev), ext_ctrl);
7670 	return 1;
7671 }
7672 
7673 static void
7674 wm_put_swfwhw_semaphore(struct wm_softc *sc)
7675 {
7676 	uint32_t ext_ctrl;
7677 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
7678 	ext_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
7679 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
7680 }
7681 
7682 static int
7683 wm_get_hw_semaphore_82573(struct wm_softc *sc)
7684 {
7685 	int i = 0;
7686 	uint32_t reg;
7687 
7688 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7689 	do {
7690 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
7691 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
7692 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7693 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
7694 			break;
7695 		delay(2*1000);
7696 		i++;
7697 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
7698 
7699 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
7700 		wm_put_hw_semaphore_82573(sc);
7701 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
7702 		    device_xname(sc->sc_dev));
7703 		return -1;
7704 	}
7705 
7706 	return 0;
7707 }
7708 
7709 static void
7710 wm_put_hw_semaphore_82573(struct wm_softc *sc)
7711 {
7712 	uint32_t reg;
7713 
7714 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
7715 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
7716 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
7717 }
7718 
7719 static int
7720 wm_valid_nvm_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
7721 {
7722 	uint32_t eecd;
7723 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
7724 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
7725 	uint8_t sig_byte = 0;
7726 
7727 	switch (sc->sc_type) {
7728 	case WM_T_ICH8:
7729 	case WM_T_ICH9:
7730 		eecd = CSR_READ(sc, WMREG_EECD);
7731 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
7732 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
7733 			return 0;
7734 		}
7735 		/* FALLTHROUGH */
7736 	default:
7737 		/* Default to 0 */
7738 		*bank = 0;
7739 
7740 		/* Check bank 0 */
7741 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
7742 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7743 			*bank = 0;
7744 			return 0;
7745 		}
7746 
7747 		/* Check bank 1 */
7748 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
7749 		    &sig_byte);
7750 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
7751 			*bank = 1;
7752 			return 0;
7753 		}
7754 	}
7755 
7756 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
7757 		device_xname(sc->sc_dev)));
7758 	return -1;
7759 }
7760 
7761 /******************************************************************************
7762  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
7763  * register.
7764  *
7765  * sc - Struct containing variables accessed by shared code
7766  * offset - offset of word in the EEPROM to read
7767  * data - word read from the EEPROM
7768  * words - number of words to read
7769  *****************************************************************************/
7770 static int
7771 wm_read_eeprom_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
7772 {
7773 	int32_t  error = 0;
7774 	uint32_t flash_bank = 0;
7775 	uint32_t act_offset = 0;
7776 	uint32_t bank_offset = 0;
7777 	uint16_t word = 0;
7778 	uint16_t i = 0;
7779 
7780 	/* We need to know which is the valid flash bank.  In the event
7781 	 * that we didn't allocate eeprom_shadow_ram, we may not be
7782 	 * managing flash_bank.  So it cannot be trusted and needs
7783 	 * to be updated with each read.
7784 	 */
7785 	error = wm_valid_nvm_bank_detect_ich8lan(sc, &flash_bank);
7786 	if (error) {
7787 		aprint_error_dev(sc->sc_dev, "%s: failed to detect NVM bank\n",
7788 		    __func__);
7789 		flash_bank = 0;
7790 	}
7791 
7792 	/*
7793 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
7794 	 * size
7795 	 */
7796 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
7797 
7798 	error = wm_get_swfwhw_semaphore(sc);
7799 	if (error) {
7800 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7801 		    __func__);
7802 		return error;
7803 	}
7804 
7805 	for (i = 0; i < words; i++) {
7806 		/* The NVM part needs a byte offset, hence * 2 */
7807 		act_offset = bank_offset + ((offset + i) * 2);
7808 		error = wm_read_ich8_word(sc, act_offset, &word);
7809 		if (error) {
7810 			aprint_error_dev(sc->sc_dev,
7811 			    "%s: failed to read NVM\n", __func__);
7812 			break;
7813 		}
7814 		data[i] = word;
7815 	}
7816 
7817 	wm_put_swfwhw_semaphore(sc);
7818 	return error;
7819 }
7820 
7821 /******************************************************************************
7822  * This function does initial flash setup so that a new read/write/erase cycle
7823  * can be started.
7824  *
7825  * sc - The pointer to the hw structure
7826  ****************************************************************************/
7827 static int32_t
7828 wm_ich8_cycle_init(struct wm_softc *sc)
7829 {
7830 	uint16_t hsfsts;
7831 	int32_t error = 1;
7832 	int32_t i     = 0;
7833 
7834 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7835 
7836 	/* May be check the Flash Des Valid bit in Hw status */
7837 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
7838 		return error;
7839 	}
7840 
7841 	/* Clear FCERR in Hw status by writing 1 */
7842 	/* Clear DAEL in Hw status by writing a 1 */
7843 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
7844 
7845 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7846 
7847 	/*
7848 	 * Either we should have a hardware SPI cycle in progress bit to check
7849 	 * against, in order to start a new cycle or FDONE bit should be
7850 	 * changed in the hardware so that it is 1 after harware reset, which
7851 	 * can then be used as an indication whether a cycle is in progress or
7852 	 * has been completed .. we should also have some software semaphore
7853 	 * mechanism to guard FDONE or the cycle in progress bit so that two
7854 	 * threads access to those bits can be sequentiallized or a way so that
7855 	 * 2 threads dont start the cycle at the same time
7856 	 */
7857 
7858 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7859 		/*
7860 		 * There is no cycle running at present, so we can start a
7861 		 * cycle
7862 		 */
7863 
7864 		/* Begin by setting Flash Cycle Done. */
7865 		hsfsts |= HSFSTS_DONE;
7866 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7867 		error = 0;
7868 	} else {
7869 		/*
7870 		 * otherwise poll for sometime so the current cycle has a
7871 		 * chance to end before giving up.
7872 		 */
7873 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
7874 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7875 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
7876 				error = 0;
7877 				break;
7878 			}
7879 			delay(1);
7880 		}
7881 		if (error == 0) {
7882 			/*
7883 			 * Successful in waiting for previous cycle to timeout,
7884 			 * now set the Flash Cycle Done.
7885 			 */
7886 			hsfsts |= HSFSTS_DONE;
7887 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
7888 		}
7889 	}
7890 	return error;
7891 }
7892 
7893 /******************************************************************************
7894  * This function starts a flash cycle and waits for its completion
7895  *
7896  * sc - The pointer to the hw structure
7897  ****************************************************************************/
7898 static int32_t
7899 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
7900 {
7901 	uint16_t hsflctl;
7902 	uint16_t hsfsts;
7903 	int32_t error = 1;
7904 	uint32_t i = 0;
7905 
7906 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
7907 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7908 	hsflctl |= HSFCTL_GO;
7909 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7910 
7911 	/* wait till FDONE bit is set to 1 */
7912 	do {
7913 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7914 		if (hsfsts & HSFSTS_DONE)
7915 			break;
7916 		delay(1);
7917 		i++;
7918 	} while (i < timeout);
7919 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
7920 		error = 0;
7921 
7922 	return error;
7923 }
7924 
7925 /******************************************************************************
7926  * Reads a byte or word from the NVM using the ICH8 flash access registers.
7927  *
7928  * sc - The pointer to the hw structure
7929  * index - The index of the byte or word to read.
7930  * size - Size of data to read, 1=byte 2=word
7931  * data - Pointer to the word to store the value read.
7932  *****************************************************************************/
7933 static int32_t
7934 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
7935     uint32_t size, uint16_t* data)
7936 {
7937 	uint16_t hsfsts;
7938 	uint16_t hsflctl;
7939 	uint32_t flash_linear_address;
7940 	uint32_t flash_data = 0;
7941 	int32_t error = 1;
7942 	int32_t count = 0;
7943 
7944 	if (size < 1  || size > 2 || data == 0x0 ||
7945 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
7946 		return error;
7947 
7948 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
7949 	    sc->sc_ich8_flash_base;
7950 
7951 	do {
7952 		delay(1);
7953 		/* Steps */
7954 		error = wm_ich8_cycle_init(sc);
7955 		if (error)
7956 			break;
7957 
7958 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
7959 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
7960 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
7961 		    & HSFCTL_BCOUNT_MASK;
7962 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
7963 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
7964 
7965 		/*
7966 		 * Write the last 24 bits of index into Flash Linear address
7967 		 * field in Flash Address
7968 		 */
7969 		/* TODO: TBD maybe check the index against the size of flash */
7970 
7971 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
7972 
7973 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
7974 
7975 		/*
7976 		 * Check if FCERR is set to 1, if set to 1, clear it and try
7977 		 * the whole sequence a few more times, else read in (shift in)
7978 		 * the Flash Data0, the order is least significant byte first
7979 		 * msb to lsb
7980 		 */
7981 		if (error == 0) {
7982 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
7983 			if (size == 1)
7984 				*data = (uint8_t)(flash_data & 0x000000FF);
7985 			else if (size == 2)
7986 				*data = (uint16_t)(flash_data & 0x0000FFFF);
7987 			break;
7988 		} else {
7989 			/*
7990 			 * If we've gotten here, then things are probably
7991 			 * completely hosed, but if the error condition is
7992 			 * detected, it won't hurt to give it another try...
7993 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
7994 			 */
7995 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
7996 			if (hsfsts & HSFSTS_ERR) {
7997 				/* Repeat for some time before giving up. */
7998 				continue;
7999 			} else if ((hsfsts & HSFSTS_DONE) == 0)
8000 				break;
8001 		}
8002 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
8003 
8004 	return error;
8005 }
8006 
8007 /******************************************************************************
8008  * Reads a single byte from the NVM using the ICH8 flash access registers.
8009  *
8010  * sc - pointer to wm_hw structure
8011  * index - The index of the byte to read.
8012  * data - Pointer to a byte to store the value read.
8013  *****************************************************************************/
8014 static int32_t
8015 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
8016 {
8017 	int32_t status;
8018 	uint16_t word = 0;
8019 
8020 	status = wm_read_ich8_data(sc, index, 1, &word);
8021 	if (status == 0)
8022 		*data = (uint8_t)word;
8023 	else
8024 		*data = 0;
8025 
8026 	return status;
8027 }
8028 
8029 /******************************************************************************
8030  * Reads a word from the NVM using the ICH8 flash access registers.
8031  *
8032  * sc - pointer to wm_hw structure
8033  * index - The starting byte index of the word to read.
8034  * data - Pointer to a word to store the value read.
8035  *****************************************************************************/
8036 static int32_t
8037 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
8038 {
8039 	int32_t status;
8040 
8041 	status = wm_read_ich8_data(sc, index, 2, data);
8042 	return status;
8043 }
8044 
8045 static int
8046 wm_check_mng_mode(struct wm_softc *sc)
8047 {
8048 	int rv;
8049 
8050 	switch (sc->sc_type) {
8051 	case WM_T_ICH8:
8052 	case WM_T_ICH9:
8053 	case WM_T_ICH10:
8054 	case WM_T_PCH:
8055 	case WM_T_PCH2:
8056 	case WM_T_PCH_LPT:
8057 		rv = wm_check_mng_mode_ich8lan(sc);
8058 		break;
8059 	case WM_T_82574:
8060 	case WM_T_82583:
8061 		rv = wm_check_mng_mode_82574(sc);
8062 		break;
8063 	case WM_T_82571:
8064 	case WM_T_82572:
8065 	case WM_T_82573:
8066 	case WM_T_80003:
8067 		rv = wm_check_mng_mode_generic(sc);
8068 		break;
8069 	default:
8070 		/* noting to do */
8071 		rv = 0;
8072 		break;
8073 	}
8074 
8075 	return rv;
8076 }
8077 
8078 static int
8079 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
8080 {
8081 	uint32_t fwsm;
8082 
8083 	fwsm = CSR_READ(sc, WMREG_FWSM);
8084 
8085 	if ((fwsm & FWSM_MODE_MASK) == (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT))
8086 		return 1;
8087 
8088 	return 0;
8089 }
8090 
8091 static int
8092 wm_check_mng_mode_82574(struct wm_softc *sc)
8093 {
8094 	uint16_t data;
8095 
8096 	wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8097 
8098 	if ((data & EEPROM_CFG2_MNGM_MASK) != 0)
8099 		return 1;
8100 
8101 	return 0;
8102 }
8103 
8104 static int
8105 wm_check_mng_mode_generic(struct wm_softc *sc)
8106 {
8107 	uint32_t fwsm;
8108 
8109 	fwsm = CSR_READ(sc, WMREG_FWSM);
8110 
8111 	if ((fwsm & FWSM_MODE_MASK) == (MNG_IAMT_MODE << FWSM_MODE_SHIFT))
8112 		return 1;
8113 
8114 	return 0;
8115 }
8116 
8117 static int
8118 wm_enable_mng_pass_thru(struct wm_softc *sc)
8119 {
8120 	uint32_t manc, fwsm, factps;
8121 
8122 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
8123 		return 0;
8124 
8125 	manc = CSR_READ(sc, WMREG_MANC);
8126 
8127 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
8128 		device_xname(sc->sc_dev), manc));
8129 	if ((manc & MANC_RECV_TCO_EN) == 0)
8130 		return 0;
8131 
8132 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
8133 		fwsm = CSR_READ(sc, WMREG_FWSM);
8134 		factps = CSR_READ(sc, WMREG_FACTPS);
8135 		if (((factps & FACTPS_MNGCG) == 0)
8136 		    && ((fwsm & FWSM_MODE_MASK)
8137 			== (MNG_ICH_IAMT_MODE << FWSM_MODE_SHIFT)))
8138 			return 1;
8139 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
8140 		uint16_t data;
8141 
8142 		factps = CSR_READ(sc, WMREG_FACTPS);
8143 		wm_read_eeprom(sc, EEPROM_OFF_CFG2, 1, &data);
8144 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
8145 			device_xname(sc->sc_dev), factps, data));
8146 		if (((factps & FACTPS_MNGCG) == 0)
8147 		    && ((data & EEPROM_CFG2_MNGM_MASK)
8148 			== (EEPROM_CFG2_MNGM_PT << EEPROM_CFG2_MNGM_SHIFT)))
8149 			return 1;
8150 	} else if (((manc & MANC_SMBUS_EN) != 0)
8151 	    && ((manc & MANC_ASF_EN) == 0))
8152 		return 1;
8153 
8154 	return 0;
8155 }
8156 
8157 static int
8158 wm_check_reset_block(struct wm_softc *sc)
8159 {
8160 	uint32_t reg;
8161 
8162 	switch (sc->sc_type) {
8163 	case WM_T_ICH8:
8164 	case WM_T_ICH9:
8165 	case WM_T_ICH10:
8166 	case WM_T_PCH:
8167 	case WM_T_PCH2:
8168 	case WM_T_PCH_LPT:
8169 		reg = CSR_READ(sc, WMREG_FWSM);
8170 		if ((reg & FWSM_RSPCIPHY) != 0)
8171 			return 0;
8172 		else
8173 			return -1;
8174 		break;
8175 	case WM_T_82571:
8176 	case WM_T_82572:
8177 	case WM_T_82573:
8178 	case WM_T_82574:
8179 	case WM_T_82583:
8180 	case WM_T_80003:
8181 		reg = CSR_READ(sc, WMREG_MANC);
8182 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
8183 			return -1;
8184 		else
8185 			return 0;
8186 		break;
8187 	default:
8188 		/* no problem */
8189 		break;
8190 	}
8191 
8192 	return 0;
8193 }
8194 
8195 static void
8196 wm_get_hw_control(struct wm_softc *sc)
8197 {
8198 	uint32_t reg;
8199 
8200 	switch (sc->sc_type) {
8201 	case WM_T_82573:
8202 		reg = CSR_READ(sc, WMREG_SWSM);
8203 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
8204 		break;
8205 	case WM_T_82571:
8206 	case WM_T_82572:
8207 	case WM_T_82574:
8208 	case WM_T_82583:
8209 	case WM_T_80003:
8210 	case WM_T_ICH8:
8211 	case WM_T_ICH9:
8212 	case WM_T_ICH10:
8213 	case WM_T_PCH:
8214 	case WM_T_PCH2:
8215 	case WM_T_PCH_LPT:
8216 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8217 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
8218 		break;
8219 	default:
8220 		break;
8221 	}
8222 }
8223 
8224 static void
8225 wm_release_hw_control(struct wm_softc *sc)
8226 {
8227 	uint32_t reg;
8228 
8229 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
8230 		return;
8231 
8232 	if (sc->sc_type == WM_T_82573) {
8233 		reg = CSR_READ(sc, WMREG_SWSM);
8234 		reg &= ~SWSM_DRV_LOAD;
8235 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
8236 	} else {
8237 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8238 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
8239 	}
8240 }
8241 
8242 /* XXX Currently TBI only */
8243 static int
8244 wm_check_for_link(struct wm_softc *sc)
8245 {
8246 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8247 	uint32_t rxcw;
8248 	uint32_t ctrl;
8249 	uint32_t status;
8250 	uint32_t sig;
8251 
8252 	rxcw = CSR_READ(sc, WMREG_RXCW);
8253 	ctrl = CSR_READ(sc, WMREG_CTRL);
8254 	status = CSR_READ(sc, WMREG_STATUS);
8255 
8256 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
8257 
8258 	DPRINTF(WM_DEBUG_LINK, ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
8259 		device_xname(sc->sc_dev), __func__,
8260 		((ctrl & CTRL_SWDPIN(1)) == sig),
8261 		((status & STATUS_LU) != 0),
8262 		((rxcw & RXCW_C) != 0)
8263 		    ));
8264 
8265 	/*
8266 	 * SWDPIN   LU RXCW
8267 	 *      0    0    0
8268 	 *      0    0    1	(should not happen)
8269 	 *      0    1    0	(should not happen)
8270 	 *      0    1    1	(should not happen)
8271 	 *      1    0    0	Disable autonego and force linkup
8272 	 *      1    0    1	got /C/ but not linkup yet
8273 	 *      1    1    0	(linkup)
8274 	 *      1    1    1	If IFM_AUTO, back to autonego
8275 	 *
8276 	 */
8277 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
8278 	    && ((status & STATUS_LU) == 0)
8279 	    && ((rxcw & RXCW_C) == 0)) {
8280 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
8281 			__func__));
8282 		sc->sc_tbi_linkup = 0;
8283 		/* Disable auto-negotiation in the TXCW register */
8284 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
8285 
8286 		/*
8287 		 * Force link-up and also force full-duplex.
8288 		 *
8289 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
8290 		 * so we should update sc->sc_ctrl
8291 		 */
8292 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
8293 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8294 	} else if (((status & STATUS_LU) != 0)
8295 	    && ((rxcw & RXCW_C) != 0)
8296 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
8297 		sc->sc_tbi_linkup = 1;
8298 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
8299 			__func__));
8300 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
8301 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
8302 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
8303 	    && ((rxcw & RXCW_C) != 0)) {
8304 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
8305 	} else {
8306 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
8307 			status));
8308 	}
8309 
8310 	return 0;
8311 }
8312 
8313 /* Work-around for 82566 Kumeran PCS lock loss */
8314 static void
8315 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
8316 {
8317 	int miistatus, active, i;
8318 	int reg;
8319 
8320 	miistatus = sc->sc_mii.mii_media_status;
8321 
8322 	/* If the link is not up, do nothing */
8323 	if ((miistatus & IFM_ACTIVE) != 0)
8324 		return;
8325 
8326 	active = sc->sc_mii.mii_media_active;
8327 
8328 	/* Nothing to do if the link is other than 1Gbps */
8329 	if (IFM_SUBTYPE(active) != IFM_1000_T)
8330 		return;
8331 
8332 	for (i = 0; i < 10; i++) {
8333 		/* read twice */
8334 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8335 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
8336 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) != 0)
8337 			goto out;	/* GOOD! */
8338 
8339 		/* Reset the PHY */
8340 		wm_gmii_reset(sc);
8341 		delay(5*1000);
8342 	}
8343 
8344 	/* Disable GigE link negotiation */
8345 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
8346 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8347 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8348 
8349 	/*
8350 	 * Call gig speed drop workaround on Gig disable before accessing
8351 	 * any PHY registers.
8352 	 */
8353 	wm_gig_downshift_workaround_ich8lan(sc);
8354 
8355 out:
8356 	return;
8357 }
8358 
8359 /* WOL from S5 stops working */
8360 static void
8361 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
8362 {
8363 	uint16_t kmrn_reg;
8364 
8365 	/* Only for igp3 */
8366 	if (sc->sc_phytype == WMPHY_IGP_3) {
8367 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
8368 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
8369 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8370 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
8371 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
8372 	}
8373 }
8374 
8375 #ifdef WM_WOL
8376 /* Power down workaround on D3 */
8377 static void
8378 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
8379 {
8380 	uint32_t reg;
8381 	int i;
8382 
8383 	for (i = 0; i < 2; i++) {
8384 		/* Disable link */
8385 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8386 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
8387 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8388 
8389 		/*
8390 		 * Call gig speed drop workaround on Gig disable before
8391 		 * accessing any PHY registers
8392 		 */
8393 		if (sc->sc_type == WM_T_ICH8)
8394 			wm_gig_downshift_workaround_ich8lan(sc);
8395 
8396 		/* Write VR power-down enable */
8397 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8398 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8399 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
8400 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
8401 
8402 		/* Read it back and test */
8403 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
8404 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
8405 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
8406 			break;
8407 
8408 		/* Issue PHY reset and repeat at most one more time */
8409 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8410 	}
8411 }
8412 #endif /* WM_WOL */
8413 
8414 /*
8415  * Workaround for pch's PHYs
8416  * XXX should be moved to new PHY driver?
8417  */
8418 static void
8419 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
8420 {
8421 	if (sc->sc_phytype == WMPHY_82577)
8422 		wm_set_mdio_slow_mode_hv(sc);
8423 
8424 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
8425 
8426 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
8427 
8428 	/* 82578 */
8429 	if (sc->sc_phytype == WMPHY_82578) {
8430 		/* PCH rev. < 3 */
8431 		if (sc->sc_rev < 3) {
8432 			/* XXX 6 bit shift? Why? Is it page2? */
8433 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
8434 			    0x66c0);
8435 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
8436 			    0xffff);
8437 		}
8438 
8439 		/* XXX phy rev. < 2 */
8440 	}
8441 
8442 	/* Select page 0 */
8443 
8444 	/* XXX acquire semaphore */
8445 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
8446 	/* XXX release semaphore */
8447 
8448 	/*
8449 	 * Configure the K1 Si workaround during phy reset assuming there is
8450 	 * link so that it disables K1 if link is in 1Gbps.
8451 	 */
8452 	wm_k1_gig_workaround_hv(sc, 1);
8453 }
8454 
8455 static void
8456 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
8457 {
8458 
8459 	wm_set_mdio_slow_mode_hv(sc);
8460 }
8461 
8462 static void
8463 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
8464 {
8465 	int k1_enable = sc->sc_nvm_k1_enabled;
8466 
8467 	/* XXX acquire semaphore */
8468 
8469 	if (link) {
8470 		k1_enable = 0;
8471 
8472 		/* Link stall fix for link up */
8473 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
8474 	} else {
8475 		/* Link stall fix for link down */
8476 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
8477 	}
8478 
8479 	wm_configure_k1_ich8lan(sc, k1_enable);
8480 
8481 	/* XXX release semaphore */
8482 }
8483 
8484 static void
8485 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
8486 {
8487 	uint32_t reg;
8488 
8489 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
8490 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
8491 	    reg | HV_KMRN_MDIO_SLOW);
8492 }
8493 
8494 static void
8495 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
8496 {
8497 	uint32_t ctrl, ctrl_ext, tmp;
8498 	uint16_t kmrn_reg;
8499 
8500 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
8501 
8502 	if (k1_enable)
8503 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
8504 	else
8505 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
8506 
8507 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
8508 
8509 	delay(20);
8510 
8511 	ctrl = CSR_READ(sc, WMREG_CTRL);
8512 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8513 
8514 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
8515 	tmp |= CTRL_FRCSPD;
8516 
8517 	CSR_WRITE(sc, WMREG_CTRL, tmp);
8518 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
8519 	CSR_WRITE_FLUSH(sc);
8520 	delay(20);
8521 
8522 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
8523 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8524 	CSR_WRITE_FLUSH(sc);
8525 	delay(20);
8526 }
8527 
8528 static void
8529 wm_smbustopci(struct wm_softc *sc)
8530 {
8531 	uint32_t fwsm;
8532 
8533 	fwsm = CSR_READ(sc, WMREG_FWSM);
8534 	if (((fwsm & FWSM_FW_VALID) == 0)
8535 	    && ((wm_check_reset_block(sc) == 0))) {
8536 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
8537 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
8538 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8539 		CSR_WRITE_FLUSH(sc);
8540 		delay(10);
8541 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
8542 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8543 		CSR_WRITE_FLUSH(sc);
8544 		delay(50*1000);
8545 
8546 		/*
8547 		 * Gate automatic PHY configuration by hardware on non-managed
8548 		 * 82579
8549 		 */
8550 		if (sc->sc_type == WM_T_PCH2)
8551 			wm_gate_hw_phy_config_ich8lan(sc, 1);
8552 	}
8553 }
8554 
8555 static void
8556 wm_set_pcie_completion_timeout(struct wm_softc *sc)
8557 {
8558 	uint32_t gcr;
8559 	pcireg_t ctrl2;
8560 
8561 	gcr = CSR_READ(sc, WMREG_GCR);
8562 
8563 	/* Only take action if timeout value is defaulted to 0 */
8564 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
8565 		goto out;
8566 
8567 	if ((gcr & GCR_CAP_VER2) == 0) {
8568 		gcr |= GCR_CMPL_TMOUT_10MS;
8569 		goto out;
8570 	}
8571 
8572 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
8573 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
8574 	ctrl2 |= WM_PCIE_DCSR2_16MS;
8575 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
8576 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
8577 
8578 out:
8579 	/* Disable completion timeout resend */
8580 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
8581 
8582 	CSR_WRITE(sc, WMREG_GCR, gcr);
8583 }
8584 
8585 /* special case - for 82575 - need to do manual init ... */
8586 static void
8587 wm_reset_init_script_82575(struct wm_softc *sc)
8588 {
8589 	/*
8590 	 * remark: this is untested code - we have no board without EEPROM
8591 	 *  same setup as mentioned int the freeBSD driver for the i82575
8592 	 */
8593 
8594 	/* SerDes configuration via SERDESCTRL */
8595 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
8596 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
8597 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
8598 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
8599 
8600 	/* CCM configuration via CCMCTL register */
8601 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
8602 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
8603 
8604 	/* PCIe lanes configuration */
8605 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
8606 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
8607 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
8608 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
8609 
8610 	/* PCIe PLL Configuration */
8611 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
8612 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
8613 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
8614 }
8615 
8616 static void
8617 wm_init_manageability(struct wm_softc *sc)
8618 {
8619 
8620 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8621 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
8622 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8623 
8624 		/* disabl hardware interception of ARP */
8625 		manc &= ~MANC_ARP_EN;
8626 
8627 		/* enable receiving management packets to the host */
8628 		if (sc->sc_type >= WM_T_82571) {
8629 			manc |= MANC_EN_MNG2HOST;
8630 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
8631 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
8632 
8633 		}
8634 
8635 		CSR_WRITE(sc, WMREG_MANC, manc);
8636 	}
8637 }
8638 
8639 static void
8640 wm_release_manageability(struct wm_softc *sc)
8641 {
8642 
8643 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
8644 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
8645 
8646 		manc |= MANC_ARP_EN;
8647 		if (sc->sc_type >= WM_T_82571)
8648 			manc &= ~MANC_EN_MNG2HOST;
8649 
8650 		CSR_WRITE(sc, WMREG_MANC, manc);
8651 	}
8652 }
8653 
8654 static void
8655 wm_get_wakeup(struct wm_softc *sc)
8656 {
8657 
8658 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
8659 	switch (sc->sc_type) {
8660 	case WM_T_82573:
8661 	case WM_T_82583:
8662 		sc->sc_flags |= WM_F_HAS_AMT;
8663 		/* FALLTHROUGH */
8664 	case WM_T_80003:
8665 	case WM_T_82541:
8666 	case WM_T_82547:
8667 	case WM_T_82571:
8668 	case WM_T_82572:
8669 	case WM_T_82574:
8670 	case WM_T_82575:
8671 	case WM_T_82576:
8672 	case WM_T_82580:
8673 	case WM_T_82580ER:
8674 	case WM_T_I350:
8675 	case WM_T_I354:
8676 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE_MASK) != 0)
8677 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
8678 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8679 		break;
8680 	case WM_T_ICH8:
8681 	case WM_T_ICH9:
8682 	case WM_T_ICH10:
8683 	case WM_T_PCH:
8684 	case WM_T_PCH2:
8685 	case WM_T_PCH_LPT:
8686 		sc->sc_flags |= WM_F_HAS_AMT;
8687 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
8688 		break;
8689 	default:
8690 		break;
8691 	}
8692 
8693 	/* 1: HAS_MANAGE */
8694 	if (wm_enable_mng_pass_thru(sc) != 0)
8695 		sc->sc_flags |= WM_F_HAS_MANAGE;
8696 
8697 #ifdef WM_DEBUG
8698 	printf("\n");
8699 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
8700 		printf("HAS_AMT,");
8701 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
8702 		printf("ARC_SUBSYS_VALID,");
8703 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
8704 		printf("ASF_FIRMWARE_PRES,");
8705 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
8706 		printf("HAS_MANAGE,");
8707 	printf("\n");
8708 #endif
8709 	/*
8710 	 * Note that the WOL flags is set after the resetting of the eeprom
8711 	 * stuff
8712 	 */
8713 }
8714 
8715 #ifdef WM_WOL
8716 /* WOL in the newer chipset interfaces (pchlan) */
8717 static void
8718 wm_enable_phy_wakeup(struct wm_softc *sc)
8719 {
8720 #if 0
8721 	uint16_t preg;
8722 
8723 	/* Copy MAC RARs to PHY RARs */
8724 
8725 	/* Copy MAC MTA to PHY MTA */
8726 
8727 	/* Configure PHY Rx Control register */
8728 
8729 	/* Enable PHY wakeup in MAC register */
8730 
8731 	/* Configure and enable PHY wakeup in PHY registers */
8732 
8733 	/* Activate PHY wakeup */
8734 
8735 	/* XXX */
8736 #endif
8737 }
8738 
8739 static void
8740 wm_enable_wakeup(struct wm_softc *sc)
8741 {
8742 	uint32_t reg, pmreg;
8743 	pcireg_t pmode;
8744 
8745 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
8746 		&pmreg, NULL) == 0)
8747 		return;
8748 
8749 	/* Advertise the wakeup capability */
8750 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
8751 	    | CTRL_SWDPIN(3));
8752 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
8753 
8754 	/* ICH workaround */
8755 	switch (sc->sc_type) {
8756 	case WM_T_ICH8:
8757 	case WM_T_ICH9:
8758 	case WM_T_ICH10:
8759 	case WM_T_PCH:
8760 	case WM_T_PCH2:
8761 	case WM_T_PCH_LPT:
8762 		/* Disable gig during WOL */
8763 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
8764 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
8765 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
8766 		if (sc->sc_type == WM_T_PCH)
8767 			wm_gmii_reset(sc);
8768 
8769 		/* Power down workaround */
8770 		if (sc->sc_phytype == WMPHY_82577) {
8771 			struct mii_softc *child;
8772 
8773 			/* Assume that the PHY is copper */
8774 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
8775 			if (child->mii_mpd_rev <= 2)
8776 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
8777 				    (768 << 5) | 25, 0x0444); /* magic num */
8778 		}
8779 		break;
8780 	default:
8781 		break;
8782 	}
8783 
8784 	/* Keep the laser running on fiber adapters */
8785 	if (((sc->sc_wmp->wmp_flags & WMP_F_1000X) != 0)
8786 	    || (sc->sc_wmp->wmp_flags & WMP_F_SERDES) != 0) {
8787 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8788 		reg |= CTRL_EXT_SWDPIN(3);
8789 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8790 	}
8791 
8792 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
8793 #if 0	/* for the multicast packet */
8794 	reg |= WUFC_MC;
8795 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
8796 #endif
8797 
8798 	if (sc->sc_type == WM_T_PCH) {
8799 		wm_enable_phy_wakeup(sc);
8800 	} else {
8801 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
8802 		CSR_WRITE(sc, WMREG_WUFC, reg);
8803 	}
8804 
8805 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
8806 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
8807 		|| (sc->sc_type == WM_T_PCH2))
8808 		    && (sc->sc_phytype == WMPHY_IGP_3))
8809 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
8810 
8811 	/* Request PME */
8812 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
8813 #if 0
8814 	/* Disable WOL */
8815 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
8816 #else
8817 	/* For WOL */
8818 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
8819 #endif
8820 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
8821 }
8822 #endif /* WM_WOL */
8823 
8824 static bool
8825 wm_suspend(device_t self, const pmf_qual_t *qual)
8826 {
8827 	struct wm_softc *sc = device_private(self);
8828 
8829 	wm_release_manageability(sc);
8830 	wm_release_hw_control(sc);
8831 #ifdef WM_WOL
8832 	wm_enable_wakeup(sc);
8833 #endif
8834 
8835 	return true;
8836 }
8837 
8838 static bool
8839 wm_resume(device_t self, const pmf_qual_t *qual)
8840 {
8841 	struct wm_softc *sc = device_private(self);
8842 
8843 	wm_init_manageability(sc);
8844 
8845 	return true;
8846 }
8847 
8848 static void
8849 wm_set_eee_i350(struct wm_softc * sc)
8850 {
8851 	uint32_t ipcnfg, eeer;
8852 
8853 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
8854 	eeer = CSR_READ(sc, WMREG_EEER);
8855 
8856 	if ((sc->sc_flags & WM_F_EEE) != 0) {
8857 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8858 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
8859 		    | EEER_LPI_FC);
8860 	} else {
8861 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
8862 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
8863 		    | EEER_LPI_FC);
8864 	}
8865 
8866 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
8867 	CSR_WRITE(sc, WMREG_EEER, eeer);
8868 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
8869 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
8870 }
8871