xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 212397c69a103ae7e5eafa8731ddfae671d2dee7)
1 /*	$NetBSD: if_wm.c,v 1.389 2016/01/29 11:30:03 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
77  *	- TX Multi queue
78  *	- EEE (Energy Efficiency Ethernet)
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  *	- Image Unique ID
83  */
84 
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.389 2016/01/29 11:30:03 msaitoh Exp $");
87 
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #endif
91 
92 #include <sys/param.h>
93 #include <sys/systm.h>
94 #include <sys/callout.h>
95 #include <sys/mbuf.h>
96 #include <sys/malloc.h>
97 #include <sys/kmem.h>
98 #include <sys/kernel.h>
99 #include <sys/socket.h>
100 #include <sys/ioctl.h>
101 #include <sys/errno.h>
102 #include <sys/device.h>
103 #include <sys/queue.h>
104 #include <sys/syslog.h>
105 #include <sys/interrupt.h>
106 
107 #include <sys/rndsource.h>
108 
109 #include <net/if.h>
110 #include <net/if_dl.h>
111 #include <net/if_media.h>
112 #include <net/if_ether.h>
113 
114 #include <net/bpf.h>
115 
116 #include <netinet/in.h>			/* XXX for struct ip */
117 #include <netinet/in_systm.h>		/* XXX for struct ip */
118 #include <netinet/ip.h>			/* XXX for struct ip */
119 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
120 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
121 
122 #include <sys/bus.h>
123 #include <sys/intr.h>
124 #include <machine/endian.h>
125 
126 #include <dev/mii/mii.h>
127 #include <dev/mii/miivar.h>
128 #include <dev/mii/miidevs.h>
129 #include <dev/mii/mii_bitbang.h>
130 #include <dev/mii/ikphyreg.h>
131 #include <dev/mii/igphyreg.h>
132 #include <dev/mii/igphyvar.h>
133 #include <dev/mii/inbmphyreg.h>
134 
135 #include <dev/pci/pcireg.h>
136 #include <dev/pci/pcivar.h>
137 #include <dev/pci/pcidevs.h>
138 
139 #include <dev/pci/if_wmreg.h>
140 #include <dev/pci/if_wmvar.h>
141 
142 #ifdef WM_DEBUG
143 #define	WM_DEBUG_LINK		0x01
144 #define	WM_DEBUG_TX		0x02
145 #define	WM_DEBUG_RX		0x04
146 #define	WM_DEBUG_GMII		0x08
147 #define	WM_DEBUG_MANAGE		0x10
148 #define	WM_DEBUG_NVM		0x20
149 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
150     | WM_DEBUG_MANAGE | WM_DEBUG_NVM;
151 
152 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
153 #else
154 #define	DPRINTF(x, y)	/* nothing */
155 #endif /* WM_DEBUG */
156 
157 #ifdef NET_MPSAFE
158 #define WM_MPSAFE	1
159 #endif
160 
161 /*
162  * This device driver's max interrupt numbers.
163  */
164 #define WM_MAX_NTXINTR		16
165 #define WM_MAX_NRXINTR		16
166 #define WM_MAX_NINTR		(WM_MAX_NTXINTR + WM_MAX_NRXINTR + 1)
167 
168 /*
169  * Transmit descriptor list size.  Due to errata, we can only have
170  * 256 hardware descriptors in the ring on < 82544, but we use 4096
171  * on >= 82544.  We tell the upper layers that they can queue a lot
172  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
173  * of them at a time.
174  *
175  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
176  * chains containing many small mbufs have been observed in zero-copy
177  * situations with jumbo frames.
178  */
179 #define	WM_NTXSEGS		256
180 #define	WM_IFQUEUELEN		256
181 #define	WM_TXQUEUELEN_MAX	64
182 #define	WM_TXQUEUELEN_MAX_82547	16
183 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
184 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
185 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
186 #define	WM_NTXDESC_82542	256
187 #define	WM_NTXDESC_82544	4096
188 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
189 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
190 #define	WM_TXDESCSIZE(txq)	(WM_NTXDESC(txq) * sizeof(wiseman_txdesc_t))
191 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
192 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
193 
194 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
195 
196 /*
197  * Receive descriptor list size.  We have one Rx buffer for normal
198  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
199  * packet.  We allocate 256 receive descriptors, each with a 2k
200  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
201  */
202 #define	WM_NRXDESC		256
203 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
204 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
205 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
206 
207 typedef union txdescs {
208 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
209 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
210 } txdescs_t;
211 
212 #define	WM_CDTXOFF(x)	(sizeof(wiseman_txdesc_t) * x)
213 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
214 
215 /*
216  * Software state for transmit jobs.
217  */
218 struct wm_txsoft {
219 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
220 	bus_dmamap_t txs_dmamap;	/* our DMA map */
221 	int txs_firstdesc;		/* first descriptor in packet */
222 	int txs_lastdesc;		/* last descriptor in packet */
223 	int txs_ndesc;			/* # of descriptors used */
224 };
225 
226 /*
227  * Software state for receive buffers.  Each descriptor gets a
228  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
229  * more than one buffer, we chain them together.
230  */
231 struct wm_rxsoft {
232 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
233 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
234 };
235 
236 #define WM_LINKUP_TIMEOUT	50
237 
238 static uint16_t swfwphysem[] = {
239 	SWFW_PHY0_SM,
240 	SWFW_PHY1_SM,
241 	SWFW_PHY2_SM,
242 	SWFW_PHY3_SM
243 };
244 
245 static const uint32_t wm_82580_rxpbs_table[] = {
246 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
247 };
248 
249 struct wm_softc;
250 
251 struct wm_txqueue {
252 	kmutex_t *txq_lock;		/* lock for tx operations */
253 
254 	struct wm_softc *txq_sc;
255 
256 	int txq_id;			/* index of transmit queues */
257 	int txq_intr_idx;		/* index of MSI-X tables */
258 
259 	/* Software state for the transmit descriptors. */
260 	int txq_num;			/* must be a power of two */
261 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
262 
263 	/* TX control data structures. */
264 	int txq_ndesc;			/* must be a power of two */
265 	txdescs_t *txq_descs_u;
266         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
267 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
268 	int txq_desc_rseg;		/* real number of control segment */
269 	size_t txq_desc_size;		/* control data size */
270 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
271 #define	txq_descs	txq_descs_u->sctxu_txdescs
272 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
273 
274 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
275 
276 	int txq_free;			/* number of free Tx descriptors */
277 	int txq_next;			/* next ready Tx descriptor */
278 
279 	int txq_sfree;			/* number of free Tx jobs */
280 	int txq_snext;			/* next free Tx job */
281 	int txq_sdirty;			/* dirty Tx jobs */
282 
283 	/* These 4 variables are used only on the 82547. */
284 	int txq_fifo_size;		/* Tx FIFO size */
285 	int txq_fifo_head;		/* current head of FIFO */
286 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
287 	int txq_fifo_stall;		/* Tx FIFO is stalled */
288 
289 	/* XXX which event counter is required? */
290 };
291 
292 struct wm_rxqueue {
293 	kmutex_t *rxq_lock;		/* lock for rx operations */
294 
295 	struct wm_softc *rxq_sc;
296 
297 	int rxq_id;			/* index of receive queues */
298 	int rxq_intr_idx;		/* index of MSI-X tables */
299 
300 	/* Software state for the receive descriptors. */
301 	wiseman_rxdesc_t *rxq_descs;
302 
303 	/* RX control data structures. */
304 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
305 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
306 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
307 	int rxq_desc_rseg;		/* real number of control segment */
308 	size_t rxq_desc_size;		/* control data size */
309 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
310 
311 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
312 
313 	int rxq_ptr;			/* next ready Rx desc/queue ent */
314 	int rxq_discard;
315 	int rxq_len;
316 	struct mbuf *rxq_head;
317 	struct mbuf *rxq_tail;
318 	struct mbuf **rxq_tailp;
319 
320 	/* XXX which event counter is required? */
321 };
322 
323 /*
324  * Software state per device.
325  */
326 struct wm_softc {
327 	device_t sc_dev;		/* generic device information */
328 	bus_space_tag_t sc_st;		/* bus space tag */
329 	bus_space_handle_t sc_sh;	/* bus space handle */
330 	bus_size_t sc_ss;		/* bus space size */
331 	bus_space_tag_t sc_iot;		/* I/O space tag */
332 	bus_space_handle_t sc_ioh;	/* I/O space handle */
333 	bus_size_t sc_ios;		/* I/O space size */
334 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
335 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
336 	bus_size_t sc_flashs;		/* flash registers space size */
337 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
338 
339 	struct ethercom sc_ethercom;	/* ethernet common data */
340 	struct mii_data sc_mii;		/* MII/media information */
341 
342 	pci_chipset_tag_t sc_pc;
343 	pcitag_t sc_pcitag;
344 	int sc_bus_speed;		/* PCI/PCIX bus speed */
345 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
346 
347 	uint16_t sc_pcidevid;		/* PCI device ID */
348 	wm_chip_type sc_type;		/* MAC type */
349 	int sc_rev;			/* MAC revision */
350 	wm_phy_type sc_phytype;		/* PHY type */
351 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
352 #define	WM_MEDIATYPE_UNKNOWN		0x00
353 #define	WM_MEDIATYPE_FIBER		0x01
354 #define	WM_MEDIATYPE_COPPER		0x02
355 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
356 	int sc_funcid;			/* unit number of the chip (0 to 3) */
357 	int sc_flags;			/* flags; see below */
358 	int sc_if_flags;		/* last if_flags */
359 	int sc_flowflags;		/* 802.3x flow control flags */
360 	int sc_align_tweak;
361 
362 	void *sc_ihs[WM_MAX_NINTR];	/*
363 					 * interrupt cookie.
364 					 * legacy and msi use sc_ihs[0].
365 					 */
366 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
367 	int sc_nintrs;			/* number of interrupts */
368 
369 	int sc_link_intr_idx;		/* index of MSI-X tables */
370 
371 	callout_t sc_tick_ch;		/* tick callout */
372 	bool sc_stopping;
373 
374 	int sc_nvm_ver_major;
375 	int sc_nvm_ver_minor;
376 	int sc_nvm_ver_build;
377 	int sc_nvm_addrbits;		/* NVM address bits */
378 	unsigned int sc_nvm_wordsize;	/* NVM word size */
379 	int sc_ich8_flash_base;
380 	int sc_ich8_flash_bank_size;
381 	int sc_nvm_k1_enabled;
382 
383 	int sc_ntxqueues;
384 	struct wm_txqueue *sc_txq;
385 
386 	int sc_nrxqueues;
387 	struct wm_rxqueue *sc_rxq;
388 
389 #ifdef WM_EVENT_COUNTERS
390 	/* Event counters. */
391 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
392 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
393 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
394 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
395 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
396 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
397 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
398 
399 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
400 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
401 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
402 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
403 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
404 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
405 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
406 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
407 
408 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
409 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped(too many segs) */
410 
411 	struct evcnt sc_ev_tu;		/* Tx underrun */
412 
413 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
414 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
415 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
416 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
417 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
418 #endif /* WM_EVENT_COUNTERS */
419 
420 	/* This variable are used only on the 82547. */
421 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
422 
423 	uint32_t sc_ctrl;		/* prototype CTRL register */
424 #if 0
425 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
426 #endif
427 	uint32_t sc_icr;		/* prototype interrupt bits */
428 	uint32_t sc_itr;		/* prototype intr throttling reg */
429 	uint32_t sc_tctl;		/* prototype TCTL register */
430 	uint32_t sc_rctl;		/* prototype RCTL register */
431 	uint32_t sc_txcw;		/* prototype TXCW register */
432 	uint32_t sc_tipg;		/* prototype TIPG register */
433 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
434 	uint32_t sc_pba;		/* prototype PBA register */
435 
436 	int sc_tbi_linkup;		/* TBI link status */
437 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
438 	int sc_tbi_serdes_ticks;	/* tbi ticks */
439 
440 	int sc_mchash_type;		/* multicast filter offset */
441 
442 	krndsource_t rnd_source;	/* random source */
443 
444 	kmutex_t *sc_core_lock;		/* lock for softc operations */
445 };
446 
447 #define WM_TX_LOCK(_txq)	if ((_txq)->txq_lock) mutex_enter((_txq)->txq_lock)
448 #define WM_TX_UNLOCK(_txq)	if ((_txq)->txq_lock) mutex_exit((_txq)->txq_lock)
449 #define WM_TX_LOCKED(_txq)	(!(_txq)->txq_lock || mutex_owned((_txq)->txq_lock))
450 #define WM_RX_LOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_enter((_rxq)->rxq_lock)
451 #define WM_RX_UNLOCK(_rxq)	if ((_rxq)->rxq_lock) mutex_exit((_rxq)->rxq_lock)
452 #define WM_RX_LOCKED(_rxq)	(!(_rxq)->rxq_lock || mutex_owned((_rxq)->rxq_lock))
453 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
454 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
455 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
456 
457 #ifdef WM_MPSAFE
458 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
459 #else
460 #define CALLOUT_FLAGS	0
461 #endif
462 
463 #define	WM_RXCHAIN_RESET(rxq)						\
464 do {									\
465 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
466 	*(rxq)->rxq_tailp = NULL;					\
467 	(rxq)->rxq_len = 0;						\
468 } while (/*CONSTCOND*/0)
469 
470 #define	WM_RXCHAIN_LINK(rxq, m)						\
471 do {									\
472 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
473 	(rxq)->rxq_tailp = &(m)->m_next;				\
474 } while (/*CONSTCOND*/0)
475 
476 #ifdef WM_EVENT_COUNTERS
477 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
478 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
479 #else
480 #define	WM_EVCNT_INCR(ev)	/* nothing */
481 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
482 #endif
483 
484 #define	CSR_READ(sc, reg)						\
485 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
486 #define	CSR_WRITE(sc, reg, val)						\
487 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
488 #define	CSR_WRITE_FLUSH(sc)						\
489 	(void) CSR_READ((sc), WMREG_STATUS)
490 
491 #define ICH8_FLASH_READ32(sc, reg) \
492 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh, (reg))
493 #define ICH8_FLASH_WRITE32(sc, reg, data) \
494 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
495 
496 #define ICH8_FLASH_READ16(sc, reg) \
497 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh, (reg))
498 #define ICH8_FLASH_WRITE16(sc, reg, data) \
499 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh, (reg), (data))
500 
501 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((x)))
502 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
503 
504 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
505 #define	WM_CDTXADDR_HI(txq, x)						\
506 	(sizeof(bus_addr_t) == 8 ?					\
507 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
508 
509 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
510 #define	WM_CDRXADDR_HI(rxq, x)						\
511 	(sizeof(bus_addr_t) == 8 ?					\
512 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
513 
514 /*
515  * Register read/write functions.
516  * Other than CSR_{READ|WRITE}().
517  */
518 #if 0
519 static inline uint32_t wm_io_read(struct wm_softc *, int);
520 #endif
521 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
522 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
523 	uint32_t, uint32_t);
524 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
525 
526 /*
527  * Descriptor sync/init functions.
528  */
529 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
530 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
531 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
532 
533 /*
534  * Device driver interface functions and commonly used functions.
535  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
536  */
537 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
538 static int	wm_match(device_t, cfdata_t, void *);
539 static void	wm_attach(device_t, device_t, void *);
540 static int	wm_detach(device_t, int);
541 static bool	wm_suspend(device_t, const pmf_qual_t *);
542 static bool	wm_resume(device_t, const pmf_qual_t *);
543 static void	wm_watchdog(struct ifnet *);
544 static void	wm_tick(void *);
545 static int	wm_ifflags_cb(struct ethercom *);
546 static int	wm_ioctl(struct ifnet *, u_long, void *);
547 /* MAC address related */
548 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
549 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
550 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
551 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
552 static void	wm_set_filter(struct wm_softc *);
553 /* Reset and init related */
554 static void	wm_set_vlan(struct wm_softc *);
555 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
556 static void	wm_get_auto_rd_done(struct wm_softc *);
557 static void	wm_lan_init_done(struct wm_softc *);
558 static void	wm_get_cfg_done(struct wm_softc *);
559 static void	wm_initialize_hardware_bits(struct wm_softc *);
560 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
561 static void	wm_reset(struct wm_softc *);
562 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
563 static void	wm_rxdrain(struct wm_rxqueue *);
564 static void	wm_rss_getkey(uint8_t *);
565 static void	wm_init_rss(struct wm_softc *);
566 static void	wm_adjust_qnum(struct wm_softc *, int);
567 static int	wm_setup_legacy(struct wm_softc *);
568 static int	wm_setup_msix(struct wm_softc *);
569 static int	wm_init(struct ifnet *);
570 static int	wm_init_locked(struct ifnet *);
571 static void	wm_stop(struct ifnet *, int);
572 static void	wm_stop_locked(struct ifnet *, int);
573 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
574 static void	wm_82547_txfifo_stall(void *);
575 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
576 /* DMA related */
577 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
578 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
579 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
580 static void	wm_init_tx_regs(struct wm_softc *, struct wm_txqueue *);
581 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
582 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
583 static void	wm_init_rx_regs(struct wm_softc *, struct wm_rxqueue *);
584 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
585 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
586 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
587 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
588 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
589 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
590 static void	wm_init_tx_queue(struct wm_softc *, struct wm_txqueue *);
591 static int	wm_init_rx_queue(struct wm_softc *, struct wm_rxqueue *);
592 static int	wm_alloc_txrx_queues(struct wm_softc *);
593 static void	wm_free_txrx_queues(struct wm_softc *);
594 static int	wm_init_txrx_queues(struct wm_softc *);
595 /* Start */
596 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
597     uint32_t *, uint8_t *);
598 static void	wm_start(struct ifnet *);
599 static void	wm_start_locked(struct ifnet *);
600 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txsoft *,
601     uint32_t *, uint32_t *, bool *);
602 static void	wm_nq_start(struct ifnet *);
603 static void	wm_nq_start_locked(struct ifnet *);
604 /* Interrupt */
605 static int	wm_txeof(struct wm_softc *);
606 static void	wm_rxeof(struct wm_rxqueue *);
607 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
608 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
609 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
610 static void	wm_linkintr(struct wm_softc *, uint32_t);
611 static int	wm_intr_legacy(void *);
612 static int	wm_txintr_msix(void *);
613 static int	wm_rxintr_msix(void *);
614 static int	wm_linkintr_msix(void *);
615 
616 /*
617  * Media related.
618  * GMII, SGMII, TBI, SERDES and SFP.
619  */
620 /* Common */
621 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
622 /* GMII related */
623 static void	wm_gmii_reset(struct wm_softc *);
624 static int	wm_get_phy_id_82575(struct wm_softc *);
625 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
626 static int	wm_gmii_mediachange(struct ifnet *);
627 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
628 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
629 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
630 static int	wm_gmii_i82543_readreg(device_t, int, int);
631 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
632 static int	wm_gmii_i82544_readreg(device_t, int, int);
633 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
634 static int	wm_gmii_i80003_readreg(device_t, int, int);
635 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
636 static int	wm_gmii_bm_readreg(device_t, int, int);
637 static void	wm_gmii_bm_writereg(device_t, int, int, int);
638 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
639 static int	wm_gmii_hv_readreg(device_t, int, int);
640 static void	wm_gmii_hv_writereg(device_t, int, int, int);
641 static int	wm_gmii_82580_readreg(device_t, int, int);
642 static void	wm_gmii_82580_writereg(device_t, int, int, int);
643 static int	wm_gmii_gs40g_readreg(device_t, int, int);
644 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
645 static void	wm_gmii_statchg(struct ifnet *);
646 static int	wm_kmrn_readreg(struct wm_softc *, int);
647 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
648 /* SGMII */
649 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
650 static int	wm_sgmii_readreg(device_t, int, int);
651 static void	wm_sgmii_writereg(device_t, int, int, int);
652 /* TBI related */
653 static void	wm_tbi_mediainit(struct wm_softc *);
654 static int	wm_tbi_mediachange(struct ifnet *);
655 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
656 static int	wm_check_for_link(struct wm_softc *);
657 static void	wm_tbi_tick(struct wm_softc *);
658 /* SERDES related */
659 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
660 static int	wm_serdes_mediachange(struct ifnet *);
661 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
662 static void	wm_serdes_tick(struct wm_softc *);
663 /* SFP related */
664 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
665 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
666 
667 /*
668  * NVM related.
669  * Microwire, SPI (w/wo EERD) and Flash.
670  */
671 /* Misc functions */
672 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
673 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
674 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
675 /* Microwire */
676 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
677 /* SPI */
678 static int	wm_nvm_ready_spi(struct wm_softc *);
679 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
680 /* Using with EERD */
681 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
682 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
683 /* Flash */
684 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
685     unsigned int *);
686 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
687 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
688 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
689 	uint16_t *);
690 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
691 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
692 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
693 /* iNVM */
694 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
695 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
696 /* Lock, detecting NVM type, validate checksum and read */
697 static int	wm_nvm_acquire(struct wm_softc *);
698 static void	wm_nvm_release(struct wm_softc *);
699 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
700 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
701 static int	wm_nvm_validate_checksum(struct wm_softc *);
702 static void	wm_nvm_version_invm(struct wm_softc *);
703 static void	wm_nvm_version(struct wm_softc *);
704 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
705 
706 /*
707  * Hardware semaphores.
708  * Very complexed...
709  */
710 static int	wm_get_swsm_semaphore(struct wm_softc *);
711 static void	wm_put_swsm_semaphore(struct wm_softc *);
712 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
713 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
714 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
715 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
716 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
717 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
718 
719 /*
720  * Management mode and power management related subroutines.
721  * BMC, AMT, suspend/resume and EEE.
722  */
723 #ifdef WM_WOL
724 static int	wm_check_mng_mode(struct wm_softc *);
725 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
726 static int	wm_check_mng_mode_82574(struct wm_softc *);
727 static int	wm_check_mng_mode_generic(struct wm_softc *);
728 #endif
729 static int	wm_enable_mng_pass_thru(struct wm_softc *);
730 static bool	wm_phy_resetisblocked(struct wm_softc *);
731 static void	wm_get_hw_control(struct wm_softc *);
732 static void	wm_release_hw_control(struct wm_softc *);
733 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, int);
734 static void	wm_smbustopci(struct wm_softc *);
735 static void	wm_init_manageability(struct wm_softc *);
736 static void	wm_release_manageability(struct wm_softc *);
737 static void	wm_get_wakeup(struct wm_softc *);
738 #ifdef WM_WOL
739 static void	wm_enable_phy_wakeup(struct wm_softc *);
740 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
741 static void	wm_enable_wakeup(struct wm_softc *);
742 #endif
743 /* LPLU (Low Power Link Up) */
744 static void	wm_lplu_d0_disable(struct wm_softc *);
745 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
746 /* EEE */
747 static void	wm_set_eee_i350(struct wm_softc *);
748 
749 /*
750  * Workarounds (mainly PHY related).
751  * Basically, PHY's workarounds are in the PHY drivers.
752  */
753 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
754 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
755 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
756 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
757 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
758 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
759 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
760 static void	wm_reset_init_script_82575(struct wm_softc *);
761 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
762 static void	wm_pll_workaround_i210(struct wm_softc *);
763 
764 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
765     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
766 
767 /*
768  * Devices supported by this driver.
769  */
770 static const struct wm_product {
771 	pci_vendor_id_t		wmp_vendor;
772 	pci_product_id_t	wmp_product;
773 	const char		*wmp_name;
774 	wm_chip_type		wmp_type;
775 	uint32_t		wmp_flags;
776 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
777 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
778 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
779 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
780 #define WMP_MEDIATYPE(x)	((x) & 0x03)
781 } wm_products[] = {
782 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
783 	  "Intel i82542 1000BASE-X Ethernet",
784 	  WM_T_82542_2_1,	WMP_F_FIBER },
785 
786 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
787 	  "Intel i82543GC 1000BASE-X Ethernet",
788 	  WM_T_82543,		WMP_F_FIBER },
789 
790 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
791 	  "Intel i82543GC 1000BASE-T Ethernet",
792 	  WM_T_82543,		WMP_F_COPPER },
793 
794 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
795 	  "Intel i82544EI 1000BASE-T Ethernet",
796 	  WM_T_82544,		WMP_F_COPPER },
797 
798 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
799 	  "Intel i82544EI 1000BASE-X Ethernet",
800 	  WM_T_82544,		WMP_F_FIBER },
801 
802 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
803 	  "Intel i82544GC 1000BASE-T Ethernet",
804 	  WM_T_82544,		WMP_F_COPPER },
805 
806 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
807 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
808 	  WM_T_82544,		WMP_F_COPPER },
809 
810 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
811 	  "Intel i82540EM 1000BASE-T Ethernet",
812 	  WM_T_82540,		WMP_F_COPPER },
813 
814 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
815 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
816 	  WM_T_82540,		WMP_F_COPPER },
817 
818 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
819 	  "Intel i82540EP 1000BASE-T Ethernet",
820 	  WM_T_82540,		WMP_F_COPPER },
821 
822 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
823 	  "Intel i82540EP 1000BASE-T Ethernet",
824 	  WM_T_82540,		WMP_F_COPPER },
825 
826 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
827 	  "Intel i82540EP 1000BASE-T Ethernet",
828 	  WM_T_82540,		WMP_F_COPPER },
829 
830 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
831 	  "Intel i82545EM 1000BASE-T Ethernet",
832 	  WM_T_82545,		WMP_F_COPPER },
833 
834 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
835 	  "Intel i82545GM 1000BASE-T Ethernet",
836 	  WM_T_82545_3,		WMP_F_COPPER },
837 
838 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
839 	  "Intel i82545GM 1000BASE-X Ethernet",
840 	  WM_T_82545_3,		WMP_F_FIBER },
841 
842 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
843 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
844 	  WM_T_82545_3,		WMP_F_SERDES },
845 
846 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
847 	  "Intel i82546EB 1000BASE-T Ethernet",
848 	  WM_T_82546,		WMP_F_COPPER },
849 
850 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
851 	  "Intel i82546EB 1000BASE-T Ethernet",
852 	  WM_T_82546,		WMP_F_COPPER },
853 
854 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
855 	  "Intel i82545EM 1000BASE-X Ethernet",
856 	  WM_T_82545,		WMP_F_FIBER },
857 
858 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
859 	  "Intel i82546EB 1000BASE-X Ethernet",
860 	  WM_T_82546,		WMP_F_FIBER },
861 
862 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
863 	  "Intel i82546GB 1000BASE-T Ethernet",
864 	  WM_T_82546_3,		WMP_F_COPPER },
865 
866 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
867 	  "Intel i82546GB 1000BASE-X Ethernet",
868 	  WM_T_82546_3,		WMP_F_FIBER },
869 
870 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
871 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
872 	  WM_T_82546_3,		WMP_F_SERDES },
873 
874 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
875 	  "i82546GB quad-port Gigabit Ethernet",
876 	  WM_T_82546_3,		WMP_F_COPPER },
877 
878 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
879 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
880 	  WM_T_82546_3,		WMP_F_COPPER },
881 
882 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
883 	  "Intel PRO/1000MT (82546GB)",
884 	  WM_T_82546_3,		WMP_F_COPPER },
885 
886 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
887 	  "Intel i82541EI 1000BASE-T Ethernet",
888 	  WM_T_82541,		WMP_F_COPPER },
889 
890 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
891 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
892 	  WM_T_82541,		WMP_F_COPPER },
893 
894 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
895 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
896 	  WM_T_82541,		WMP_F_COPPER },
897 
898 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
899 	  "Intel i82541ER 1000BASE-T Ethernet",
900 	  WM_T_82541_2,		WMP_F_COPPER },
901 
902 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
903 	  "Intel i82541GI 1000BASE-T Ethernet",
904 	  WM_T_82541_2,		WMP_F_COPPER },
905 
906 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
907 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
908 	  WM_T_82541_2,		WMP_F_COPPER },
909 
910 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
911 	  "Intel i82541PI 1000BASE-T Ethernet",
912 	  WM_T_82541_2,		WMP_F_COPPER },
913 
914 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
915 	  "Intel i82547EI 1000BASE-T Ethernet",
916 	  WM_T_82547,		WMP_F_COPPER },
917 
918 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
919 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
920 	  WM_T_82547,		WMP_F_COPPER },
921 
922 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
923 	  "Intel i82547GI 1000BASE-T Ethernet",
924 	  WM_T_82547_2,		WMP_F_COPPER },
925 
926 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
927 	  "Intel PRO/1000 PT (82571EB)",
928 	  WM_T_82571,		WMP_F_COPPER },
929 
930 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
931 	  "Intel PRO/1000 PF (82571EB)",
932 	  WM_T_82571,		WMP_F_FIBER },
933 
934 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
935 	  "Intel PRO/1000 PB (82571EB)",
936 	  WM_T_82571,		WMP_F_SERDES },
937 
938 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
939 	  "Intel PRO/1000 QT (82571EB)",
940 	  WM_T_82571,		WMP_F_COPPER },
941 
942 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
943 	  "Intel PRO/1000 PT Quad Port Server Adapter",
944 	  WM_T_82571,		WMP_F_COPPER, },
945 
946 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
947 	  "Intel Gigabit PT Quad Port Server ExpressModule",
948 	  WM_T_82571,		WMP_F_COPPER, },
949 
950 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
951 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
952 	  WM_T_82571,		WMP_F_SERDES, },
953 
954 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
955 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
956 	  WM_T_82571,		WMP_F_SERDES, },
957 
958 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
959 	  "Intel 82571EB Quad 1000baseX Ethernet",
960 	  WM_T_82571,		WMP_F_FIBER, },
961 
962 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
963 	  "Intel i82572EI 1000baseT Ethernet",
964 	  WM_T_82572,		WMP_F_COPPER },
965 
966 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
967 	  "Intel i82572EI 1000baseX Ethernet",
968 	  WM_T_82572,		WMP_F_FIBER },
969 
970 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
971 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
972 	  WM_T_82572,		WMP_F_SERDES },
973 
974 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
975 	  "Intel i82572EI 1000baseT Ethernet",
976 	  WM_T_82572,		WMP_F_COPPER },
977 
978 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
979 	  "Intel i82573E",
980 	  WM_T_82573,		WMP_F_COPPER },
981 
982 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
983 	  "Intel i82573E IAMT",
984 	  WM_T_82573,		WMP_F_COPPER },
985 
986 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
987 	  "Intel i82573L Gigabit Ethernet",
988 	  WM_T_82573,		WMP_F_COPPER },
989 
990 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
991 	  "Intel i82574L",
992 	  WM_T_82574,		WMP_F_COPPER },
993 
994 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
995 	  "Intel i82574L",
996 	  WM_T_82574,		WMP_F_COPPER },
997 
998 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
999 	  "Intel i82583V",
1000 	  WM_T_82583,		WMP_F_COPPER },
1001 
1002 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1003 	  "i80003 dual 1000baseT Ethernet",
1004 	  WM_T_80003,		WMP_F_COPPER },
1005 
1006 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1007 	  "i80003 dual 1000baseX Ethernet",
1008 	  WM_T_80003,		WMP_F_COPPER },
1009 
1010 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1011 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1012 	  WM_T_80003,		WMP_F_SERDES },
1013 
1014 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1015 	  "Intel i80003 1000baseT Ethernet",
1016 	  WM_T_80003,		WMP_F_COPPER },
1017 
1018 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1019 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1020 	  WM_T_80003,		WMP_F_SERDES },
1021 
1022 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1023 	  "Intel i82801H (M_AMT) LAN Controller",
1024 	  WM_T_ICH8,		WMP_F_COPPER },
1025 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1026 	  "Intel i82801H (AMT) LAN Controller",
1027 	  WM_T_ICH8,		WMP_F_COPPER },
1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1029 	  "Intel i82801H LAN Controller",
1030 	  WM_T_ICH8,		WMP_F_COPPER },
1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1032 	  "Intel i82801H (IFE) LAN Controller",
1033 	  WM_T_ICH8,		WMP_F_COPPER },
1034 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1035 	  "Intel i82801H (M) LAN Controller",
1036 	  WM_T_ICH8,		WMP_F_COPPER },
1037 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1038 	  "Intel i82801H IFE (GT) LAN Controller",
1039 	  WM_T_ICH8,		WMP_F_COPPER },
1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1041 	  "Intel i82801H IFE (G) LAN Controller",
1042 	  WM_T_ICH8,		WMP_F_COPPER },
1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1044 	  "82801I (AMT) LAN Controller",
1045 	  WM_T_ICH9,		WMP_F_COPPER },
1046 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1047 	  "82801I LAN Controller",
1048 	  WM_T_ICH9,		WMP_F_COPPER },
1049 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1050 	  "82801I (G) LAN Controller",
1051 	  WM_T_ICH9,		WMP_F_COPPER },
1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1053 	  "82801I (GT) LAN Controller",
1054 	  WM_T_ICH9,		WMP_F_COPPER },
1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1056 	  "82801I (C) LAN Controller",
1057 	  WM_T_ICH9,		WMP_F_COPPER },
1058 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1059 	  "82801I mobile LAN Controller",
1060 	  WM_T_ICH9,		WMP_F_COPPER },
1061 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1062 	  "82801I mobile (V) LAN Controller",
1063 	  WM_T_ICH9,		WMP_F_COPPER },
1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1065 	  "82801I mobile (AMT) LAN Controller",
1066 	  WM_T_ICH9,		WMP_F_COPPER },
1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1068 	  "82567LM-4 LAN Controller",
1069 	  WM_T_ICH9,		WMP_F_COPPER },
1070 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
1071 	  "82567V-3 LAN Controller",
1072 	  WM_T_ICH9,		WMP_F_COPPER },
1073 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1074 	  "82567LM-2 LAN Controller",
1075 	  WM_T_ICH10,		WMP_F_COPPER },
1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1077 	  "82567LF-2 LAN Controller",
1078 	  WM_T_ICH10,		WMP_F_COPPER },
1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1080 	  "82567LM-3 LAN Controller",
1081 	  WM_T_ICH10,		WMP_F_COPPER },
1082 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1083 	  "82567LF-3 LAN Controller",
1084 	  WM_T_ICH10,		WMP_F_COPPER },
1085 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1086 	  "82567V-2 LAN Controller",
1087 	  WM_T_ICH10,		WMP_F_COPPER },
1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1089 	  "82567V-3? LAN Controller",
1090 	  WM_T_ICH10,		WMP_F_COPPER },
1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1092 	  "HANKSVILLE LAN Controller",
1093 	  WM_T_ICH10,		WMP_F_COPPER },
1094 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1095 	  "PCH LAN (82577LM) Controller",
1096 	  WM_T_PCH,		WMP_F_COPPER },
1097 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1098 	  "PCH LAN (82577LC) Controller",
1099 	  WM_T_PCH,		WMP_F_COPPER },
1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1101 	  "PCH LAN (82578DM) Controller",
1102 	  WM_T_PCH,		WMP_F_COPPER },
1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1104 	  "PCH LAN (82578DC) Controller",
1105 	  WM_T_PCH,		WMP_F_COPPER },
1106 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1107 	  "PCH2 LAN (82579LM) Controller",
1108 	  WM_T_PCH2,		WMP_F_COPPER },
1109 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1110 	  "PCH2 LAN (82579V) Controller",
1111 	  WM_T_PCH2,		WMP_F_COPPER },
1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1113 	  "82575EB dual-1000baseT Ethernet",
1114 	  WM_T_82575,		WMP_F_COPPER },
1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1116 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1117 	  WM_T_82575,		WMP_F_SERDES },
1118 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1119 	  "82575GB quad-1000baseT Ethernet",
1120 	  WM_T_82575,		WMP_F_COPPER },
1121 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1122 	  "82575GB quad-1000baseT Ethernet (PM)",
1123 	  WM_T_82575,		WMP_F_COPPER },
1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1125 	  "82576 1000BaseT Ethernet",
1126 	  WM_T_82576,		WMP_F_COPPER },
1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1128 	  "82576 1000BaseX Ethernet",
1129 	  WM_T_82576,		WMP_F_FIBER },
1130 
1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1132 	  "82576 gigabit Ethernet (SERDES)",
1133 	  WM_T_82576,		WMP_F_SERDES },
1134 
1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1136 	  "82576 quad-1000BaseT Ethernet",
1137 	  WM_T_82576,		WMP_F_COPPER },
1138 
1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1140 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1141 	  WM_T_82576,		WMP_F_COPPER },
1142 
1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1144 	  "82576 gigabit Ethernet",
1145 	  WM_T_82576,		WMP_F_COPPER },
1146 
1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1148 	  "82576 gigabit Ethernet (SERDES)",
1149 	  WM_T_82576,		WMP_F_SERDES },
1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1151 	  "82576 quad-gigabit Ethernet (SERDES)",
1152 	  WM_T_82576,		WMP_F_SERDES },
1153 
1154 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1155 	  "82580 1000BaseT Ethernet",
1156 	  WM_T_82580,		WMP_F_COPPER },
1157 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1158 	  "82580 1000BaseX Ethernet",
1159 	  WM_T_82580,		WMP_F_FIBER },
1160 
1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1162 	  "82580 1000BaseT Ethernet (SERDES)",
1163 	  WM_T_82580,		WMP_F_SERDES },
1164 
1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1166 	  "82580 gigabit Ethernet (SGMII)",
1167 	  WM_T_82580,		WMP_F_COPPER },
1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1169 	  "82580 dual-1000BaseT Ethernet",
1170 	  WM_T_82580,		WMP_F_COPPER },
1171 
1172 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1173 	  "82580 quad-1000BaseX Ethernet",
1174 	  WM_T_82580,		WMP_F_FIBER },
1175 
1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1177 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1178 	  WM_T_82580,		WMP_F_COPPER },
1179 
1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1181 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1182 	  WM_T_82580,		WMP_F_SERDES },
1183 
1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1185 	  "DH89XXCC 1000BASE-KX Ethernet",
1186 	  WM_T_82580,		WMP_F_SERDES },
1187 
1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1189 	  "DH89XXCC Gigabit Ethernet (SFP)",
1190 	  WM_T_82580,		WMP_F_SERDES },
1191 
1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1193 	  "I350 Gigabit Network Connection",
1194 	  WM_T_I350,		WMP_F_COPPER },
1195 
1196 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1197 	  "I350 Gigabit Fiber Network Connection",
1198 	  WM_T_I350,		WMP_F_FIBER },
1199 
1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1201 	  "I350 Gigabit Backplane Connection",
1202 	  WM_T_I350,		WMP_F_SERDES },
1203 
1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1205 	  "I350 Quad Port Gigabit Ethernet",
1206 	  WM_T_I350,		WMP_F_SERDES },
1207 
1208 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1209 	  "I350 Gigabit Connection",
1210 	  WM_T_I350,		WMP_F_COPPER },
1211 
1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1213 	  "I354 Gigabit Ethernet (KX)",
1214 	  WM_T_I354,		WMP_F_SERDES },
1215 
1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1217 	  "I354 Gigabit Ethernet (SGMII)",
1218 	  WM_T_I354,		WMP_F_COPPER },
1219 
1220 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1221 	  "I354 Gigabit Ethernet (2.5G)",
1222 	  WM_T_I354,		WMP_F_COPPER },
1223 
1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1225 	  "I210-T1 Ethernet Server Adapter",
1226 	  WM_T_I210,		WMP_F_COPPER },
1227 
1228 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1229 	  "I210 Ethernet (Copper OEM)",
1230 	  WM_T_I210,		WMP_F_COPPER },
1231 
1232 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1233 	  "I210 Ethernet (Copper IT)",
1234 	  WM_T_I210,		WMP_F_COPPER },
1235 
1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1237 	  "I210 Ethernet (FLASH less)",
1238 	  WM_T_I210,		WMP_F_COPPER },
1239 
1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1241 	  "I210 Gigabit Ethernet (Fiber)",
1242 	  WM_T_I210,		WMP_F_FIBER },
1243 
1244 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1245 	  "I210 Gigabit Ethernet (SERDES)",
1246 	  WM_T_I210,		WMP_F_SERDES },
1247 
1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1249 	  "I210 Gigabit Ethernet (FLASH less)",
1250 	  WM_T_I210,		WMP_F_SERDES },
1251 
1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1253 	  "I210 Gigabit Ethernet (SGMII)",
1254 	  WM_T_I210,		WMP_F_COPPER },
1255 
1256 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1257 	  "I211 Ethernet (COPPER)",
1258 	  WM_T_I211,		WMP_F_COPPER },
1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1260 	  "I217 V Ethernet Connection",
1261 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1263 	  "I217 LM Ethernet Connection",
1264 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1266 	  "I218 V Ethernet Connection",
1267 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1269 	  "I218 V Ethernet Connection",
1270 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1271 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1272 	  "I218 V Ethernet Connection",
1273 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1275 	  "I218 LM Ethernet Connection",
1276 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1277 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1278 	  "I218 LM Ethernet Connection",
1279 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1280 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1281 	  "I218 LM Ethernet Connection",
1282 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1283 	{ 0,			0,
1284 	  NULL,
1285 	  0,			0 },
1286 };
1287 
1288 #ifdef WM_EVENT_COUNTERS
1289 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1290 #endif /* WM_EVENT_COUNTERS */
1291 
1292 
1293 /*
1294  * Register read/write functions.
1295  * Other than CSR_{READ|WRITE}().
1296  */
1297 
1298 #if 0 /* Not currently used */
1299 static inline uint32_t
1300 wm_io_read(struct wm_softc *sc, int reg)
1301 {
1302 
1303 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1304 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1305 }
1306 #endif
1307 
1308 static inline void
1309 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1310 {
1311 
1312 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1313 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1314 }
1315 
1316 static inline void
1317 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1318     uint32_t data)
1319 {
1320 	uint32_t regval;
1321 	int i;
1322 
1323 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1324 
1325 	CSR_WRITE(sc, reg, regval);
1326 
1327 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1328 		delay(5);
1329 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1330 			break;
1331 	}
1332 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1333 		aprint_error("%s: WARNING:"
1334 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1335 		    device_xname(sc->sc_dev), reg);
1336 	}
1337 }
1338 
1339 static inline void
1340 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1341 {
1342 	wa->wa_low = htole32(v & 0xffffffffU);
1343 	if (sizeof(bus_addr_t) == 8)
1344 		wa->wa_high = htole32((uint64_t) v >> 32);
1345 	else
1346 		wa->wa_high = 0;
1347 }
1348 
1349 /*
1350  * Descriptor sync/init functions.
1351  */
1352 static inline void
1353 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1354 {
1355 	struct wm_softc *sc = txq->txq_sc;
1356 
1357 	/* If it will wrap around, sync to the end of the ring. */
1358 	if ((start + num) > WM_NTXDESC(txq)) {
1359 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1360 		    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) *
1361 		    (WM_NTXDESC(txq) - start), ops);
1362 		num -= (WM_NTXDESC(txq) - start);
1363 		start = 0;
1364 	}
1365 
1366 	/* Now sync whatever is left. */
1367 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1368 	    WM_CDTXOFF(start), sizeof(wiseman_txdesc_t) * num, ops);
1369 }
1370 
1371 static inline void
1372 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1373 {
1374 	struct wm_softc *sc = rxq->rxq_sc;
1375 
1376 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1377 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1378 }
1379 
1380 static inline void
1381 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1382 {
1383 	struct wm_softc *sc = rxq->rxq_sc;
1384 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1385 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1386 	struct mbuf *m = rxs->rxs_mbuf;
1387 
1388 	/*
1389 	 * Note: We scoot the packet forward 2 bytes in the buffer
1390 	 * so that the payload after the Ethernet header is aligned
1391 	 * to a 4-byte boundary.
1392 
1393 	 * XXX BRAINDAMAGE ALERT!
1394 	 * The stupid chip uses the same size for every buffer, which
1395 	 * is set in the Receive Control register.  We are using the 2K
1396 	 * size option, but what we REALLY want is (2K - 2)!  For this
1397 	 * reason, we can't "scoot" packets longer than the standard
1398 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1399 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1400 	 * the upper layer copy the headers.
1401 	 */
1402 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1403 
1404 	wm_set_dma_addr(&rxd->wrx_addr,
1405 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1406 	rxd->wrx_len = 0;
1407 	rxd->wrx_cksum = 0;
1408 	rxd->wrx_status = 0;
1409 	rxd->wrx_errors = 0;
1410 	rxd->wrx_special = 0;
1411 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1412 
1413 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1414 }
1415 
1416 /*
1417  * Device driver interface functions and commonly used functions.
1418  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1419  */
1420 
1421 /* Lookup supported device table */
1422 static const struct wm_product *
1423 wm_lookup(const struct pci_attach_args *pa)
1424 {
1425 	const struct wm_product *wmp;
1426 
1427 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1428 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1429 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1430 			return wmp;
1431 	}
1432 	return NULL;
1433 }
1434 
1435 /* The match function (ca_match) */
1436 static int
1437 wm_match(device_t parent, cfdata_t cf, void *aux)
1438 {
1439 	struct pci_attach_args *pa = aux;
1440 
1441 	if (wm_lookup(pa) != NULL)
1442 		return 1;
1443 
1444 	return 0;
1445 }
1446 
1447 /* The attach function (ca_attach) */
1448 static void
1449 wm_attach(device_t parent, device_t self, void *aux)
1450 {
1451 	struct wm_softc *sc = device_private(self);
1452 	struct pci_attach_args *pa = aux;
1453 	prop_dictionary_t dict;
1454 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1455 	pci_chipset_tag_t pc = pa->pa_pc;
1456 	int counts[PCI_INTR_TYPE_SIZE];
1457 	pci_intr_type_t max_type;
1458 	const char *eetype, *xname;
1459 	bus_space_tag_t memt;
1460 	bus_space_handle_t memh;
1461 	bus_size_t memsize;
1462 	int memh_valid;
1463 	int i, error;
1464 	const struct wm_product *wmp;
1465 	prop_data_t ea;
1466 	prop_number_t pn;
1467 	uint8_t enaddr[ETHER_ADDR_LEN];
1468 	uint16_t cfg1, cfg2, swdpin, nvmword;
1469 	pcireg_t preg, memtype;
1470 	uint16_t eeprom_data, apme_mask;
1471 	bool force_clear_smbi;
1472 	uint32_t link_mode;
1473 	uint32_t reg;
1474 
1475 	sc->sc_dev = self;
1476 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1477 	sc->sc_stopping = false;
1478 
1479 	wmp = wm_lookup(pa);
1480 #ifdef DIAGNOSTIC
1481 	if (wmp == NULL) {
1482 		printf("\n");
1483 		panic("wm_attach: impossible");
1484 	}
1485 #endif
1486 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1487 
1488 	sc->sc_pc = pa->pa_pc;
1489 	sc->sc_pcitag = pa->pa_tag;
1490 
1491 	if (pci_dma64_available(pa))
1492 		sc->sc_dmat = pa->pa_dmat64;
1493 	else
1494 		sc->sc_dmat = pa->pa_dmat;
1495 
1496 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1497 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1498 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1499 
1500 	sc->sc_type = wmp->wmp_type;
1501 	if (sc->sc_type < WM_T_82543) {
1502 		if (sc->sc_rev < 2) {
1503 			aprint_error_dev(sc->sc_dev,
1504 			    "i82542 must be at least rev. 2\n");
1505 			return;
1506 		}
1507 		if (sc->sc_rev < 3)
1508 			sc->sc_type = WM_T_82542_2_0;
1509 	}
1510 
1511 	/*
1512 	 * Disable MSI for Errata:
1513 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1514 	 *
1515 	 *  82544: Errata 25
1516 	 *  82540: Errata  6 (easy to reproduce device timeout)
1517 	 *  82545: Errata  4 (easy to reproduce device timeout)
1518 	 *  82546: Errata 26 (easy to reproduce device timeout)
1519 	 *  82541: Errata  7 (easy to reproduce device timeout)
1520 	 *
1521 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1522 	 *
1523 	 *  82571 & 82572: Errata 63
1524 	 */
1525 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1526 	    || (sc->sc_type == WM_T_82572))
1527 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1528 
1529 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1530 	    || (sc->sc_type == WM_T_82580)
1531 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1532 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1533 		sc->sc_flags |= WM_F_NEWQUEUE;
1534 
1535 	/* Set device properties (mactype) */
1536 	dict = device_properties(sc->sc_dev);
1537 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1538 
1539 	/*
1540 	 * Map the device.  All devices support memory-mapped acccess,
1541 	 * and it is really required for normal operation.
1542 	 */
1543 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1544 	switch (memtype) {
1545 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1546 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1547 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1548 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1549 		break;
1550 	default:
1551 		memh_valid = 0;
1552 		break;
1553 	}
1554 
1555 	if (memh_valid) {
1556 		sc->sc_st = memt;
1557 		sc->sc_sh = memh;
1558 		sc->sc_ss = memsize;
1559 	} else {
1560 		aprint_error_dev(sc->sc_dev,
1561 		    "unable to map device registers\n");
1562 		return;
1563 	}
1564 
1565 	/*
1566 	 * In addition, i82544 and later support I/O mapped indirect
1567 	 * register access.  It is not desirable (nor supported in
1568 	 * this driver) to use it for normal operation, though it is
1569 	 * required to work around bugs in some chip versions.
1570 	 */
1571 	if (sc->sc_type >= WM_T_82544) {
1572 		/* First we have to find the I/O BAR. */
1573 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1574 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1575 			if (memtype == PCI_MAPREG_TYPE_IO)
1576 				break;
1577 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1578 			    PCI_MAPREG_MEM_TYPE_64BIT)
1579 				i += 4;	/* skip high bits, too */
1580 		}
1581 		if (i < PCI_MAPREG_END) {
1582 			/*
1583 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1584 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1585 			 * It's no problem because newer chips has no this
1586 			 * bug.
1587 			 *
1588 			 * The i8254x doesn't apparently respond when the
1589 			 * I/O BAR is 0, which looks somewhat like it's not
1590 			 * been configured.
1591 			 */
1592 			preg = pci_conf_read(pc, pa->pa_tag, i);
1593 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1594 				aprint_error_dev(sc->sc_dev,
1595 				    "WARNING: I/O BAR at zero.\n");
1596 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1597 					0, &sc->sc_iot, &sc->sc_ioh,
1598 					NULL, &sc->sc_ios) == 0) {
1599 				sc->sc_flags |= WM_F_IOH_VALID;
1600 			} else {
1601 				aprint_error_dev(sc->sc_dev,
1602 				    "WARNING: unable to map I/O space\n");
1603 			}
1604 		}
1605 
1606 	}
1607 
1608 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1609 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1610 	preg |= PCI_COMMAND_MASTER_ENABLE;
1611 	if (sc->sc_type < WM_T_82542_2_1)
1612 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1613 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1614 
1615 	/* power up chip */
1616 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1617 	    NULL)) && error != EOPNOTSUPP) {
1618 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1619 		return;
1620 	}
1621 
1622 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1623 
1624 	/* Allocation settings */
1625 	max_type = PCI_INTR_TYPE_MSIX;
1626 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
1627 	counts[PCI_INTR_TYPE_MSI] = 1;
1628 	counts[PCI_INTR_TYPE_INTX] = 1;
1629 
1630 alloc_retry:
1631 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1632 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1633 		return;
1634 	}
1635 
1636 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1637 		error = wm_setup_msix(sc);
1638 		if (error) {
1639 			pci_intr_release(pc, sc->sc_intrs,
1640 			    counts[PCI_INTR_TYPE_MSIX]);
1641 
1642 			/* Setup for MSI: Disable MSI-X */
1643 			max_type = PCI_INTR_TYPE_MSI;
1644 			counts[PCI_INTR_TYPE_MSI] = 1;
1645 			counts[PCI_INTR_TYPE_INTX] = 1;
1646 			goto alloc_retry;
1647 		}
1648 	} else 	if (pci_intr_type(sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1649 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1650 		error = wm_setup_legacy(sc);
1651 		if (error) {
1652 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1653 			    counts[PCI_INTR_TYPE_MSI]);
1654 
1655 			/* The next try is for INTx: Disable MSI */
1656 			max_type = PCI_INTR_TYPE_INTX;
1657 			counts[PCI_INTR_TYPE_INTX] = 1;
1658 			goto alloc_retry;
1659 		}
1660 	} else {
1661 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1662 		error = wm_setup_legacy(sc);
1663 		if (error) {
1664 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1665 			    counts[PCI_INTR_TYPE_INTX]);
1666 			return;
1667 		}
1668 	}
1669 
1670 	/*
1671 	 * Check the function ID (unit number of the chip).
1672 	 */
1673 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1674 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1675 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1676 	    || (sc->sc_type == WM_T_82580)
1677 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1678 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1679 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1680 	else
1681 		sc->sc_funcid = 0;
1682 
1683 	/*
1684 	 * Determine a few things about the bus we're connected to.
1685 	 */
1686 	if (sc->sc_type < WM_T_82543) {
1687 		/* We don't really know the bus characteristics here. */
1688 		sc->sc_bus_speed = 33;
1689 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1690 		/*
1691 		 * CSA (Communication Streaming Architecture) is about as fast
1692 		 * a 32-bit 66MHz PCI Bus.
1693 		 */
1694 		sc->sc_flags |= WM_F_CSA;
1695 		sc->sc_bus_speed = 66;
1696 		aprint_verbose_dev(sc->sc_dev,
1697 		    "Communication Streaming Architecture\n");
1698 		if (sc->sc_type == WM_T_82547) {
1699 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1700 			callout_setfunc(&sc->sc_txfifo_ch,
1701 					wm_82547_txfifo_stall, sc);
1702 			aprint_verbose_dev(sc->sc_dev,
1703 			    "using 82547 Tx FIFO stall work-around\n");
1704 		}
1705 	} else if (sc->sc_type >= WM_T_82571) {
1706 		sc->sc_flags |= WM_F_PCIE;
1707 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1708 		    && (sc->sc_type != WM_T_ICH10)
1709 		    && (sc->sc_type != WM_T_PCH)
1710 		    && (sc->sc_type != WM_T_PCH2)
1711 		    && (sc->sc_type != WM_T_PCH_LPT)) {
1712 			/* ICH* and PCH* have no PCIe capability registers */
1713 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1714 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1715 				NULL) == 0)
1716 				aprint_error_dev(sc->sc_dev,
1717 				    "unable to find PCIe capability\n");
1718 		}
1719 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1720 	} else {
1721 		reg = CSR_READ(sc, WMREG_STATUS);
1722 		if (reg & STATUS_BUS64)
1723 			sc->sc_flags |= WM_F_BUS64;
1724 		if ((reg & STATUS_PCIX_MODE) != 0) {
1725 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1726 
1727 			sc->sc_flags |= WM_F_PCIX;
1728 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1729 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1730 				aprint_error_dev(sc->sc_dev,
1731 				    "unable to find PCIX capability\n");
1732 			else if (sc->sc_type != WM_T_82545_3 &&
1733 				 sc->sc_type != WM_T_82546_3) {
1734 				/*
1735 				 * Work around a problem caused by the BIOS
1736 				 * setting the max memory read byte count
1737 				 * incorrectly.
1738 				 */
1739 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1740 				    sc->sc_pcixe_capoff + PCIX_CMD);
1741 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1742 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1743 
1744 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1745 				    PCIX_CMD_BYTECNT_SHIFT;
1746 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1747 				    PCIX_STATUS_MAXB_SHIFT;
1748 				if (bytecnt > maxb) {
1749 					aprint_verbose_dev(sc->sc_dev,
1750 					    "resetting PCI-X MMRBC: %d -> %d\n",
1751 					    512 << bytecnt, 512 << maxb);
1752 					pcix_cmd = (pcix_cmd &
1753 					    ~PCIX_CMD_BYTECNT_MASK) |
1754 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1755 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1756 					    sc->sc_pcixe_capoff + PCIX_CMD,
1757 					    pcix_cmd);
1758 				}
1759 			}
1760 		}
1761 		/*
1762 		 * The quad port adapter is special; it has a PCIX-PCIX
1763 		 * bridge on the board, and can run the secondary bus at
1764 		 * a higher speed.
1765 		 */
1766 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1767 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1768 								      : 66;
1769 		} else if (sc->sc_flags & WM_F_PCIX) {
1770 			switch (reg & STATUS_PCIXSPD_MASK) {
1771 			case STATUS_PCIXSPD_50_66:
1772 				sc->sc_bus_speed = 66;
1773 				break;
1774 			case STATUS_PCIXSPD_66_100:
1775 				sc->sc_bus_speed = 100;
1776 				break;
1777 			case STATUS_PCIXSPD_100_133:
1778 				sc->sc_bus_speed = 133;
1779 				break;
1780 			default:
1781 				aprint_error_dev(sc->sc_dev,
1782 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1783 				    reg & STATUS_PCIXSPD_MASK);
1784 				sc->sc_bus_speed = 66;
1785 				break;
1786 			}
1787 		} else
1788 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1789 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1790 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1791 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1792 	}
1793 
1794 	/* clear interesting stat counters */
1795 	CSR_READ(sc, WMREG_COLC);
1796 	CSR_READ(sc, WMREG_RXERRC);
1797 
1798 	/* get PHY control from SMBus to PCIe */
1799 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1800 	    || (sc->sc_type == WM_T_PCH_LPT))
1801 		wm_smbustopci(sc);
1802 
1803 	/* Reset the chip to a known state. */
1804 	wm_reset(sc);
1805 
1806 	/* Get some information about the EEPROM. */
1807 	switch (sc->sc_type) {
1808 	case WM_T_82542_2_0:
1809 	case WM_T_82542_2_1:
1810 	case WM_T_82543:
1811 	case WM_T_82544:
1812 		/* Microwire */
1813 		sc->sc_nvm_wordsize = 64;
1814 		sc->sc_nvm_addrbits = 6;
1815 		break;
1816 	case WM_T_82540:
1817 	case WM_T_82545:
1818 	case WM_T_82545_3:
1819 	case WM_T_82546:
1820 	case WM_T_82546_3:
1821 		/* Microwire */
1822 		reg = CSR_READ(sc, WMREG_EECD);
1823 		if (reg & EECD_EE_SIZE) {
1824 			sc->sc_nvm_wordsize = 256;
1825 			sc->sc_nvm_addrbits = 8;
1826 		} else {
1827 			sc->sc_nvm_wordsize = 64;
1828 			sc->sc_nvm_addrbits = 6;
1829 		}
1830 		sc->sc_flags |= WM_F_LOCK_EECD;
1831 		break;
1832 	case WM_T_82541:
1833 	case WM_T_82541_2:
1834 	case WM_T_82547:
1835 	case WM_T_82547_2:
1836 		sc->sc_flags |= WM_F_LOCK_EECD;
1837 		reg = CSR_READ(sc, WMREG_EECD);
1838 		if (reg & EECD_EE_TYPE) {
1839 			/* SPI */
1840 			sc->sc_flags |= WM_F_EEPROM_SPI;
1841 			wm_nvm_set_addrbits_size_eecd(sc);
1842 		} else {
1843 			/* Microwire */
1844 			if ((reg & EECD_EE_ABITS) != 0) {
1845 				sc->sc_nvm_wordsize = 256;
1846 				sc->sc_nvm_addrbits = 8;
1847 			} else {
1848 				sc->sc_nvm_wordsize = 64;
1849 				sc->sc_nvm_addrbits = 6;
1850 			}
1851 		}
1852 		break;
1853 	case WM_T_82571:
1854 	case WM_T_82572:
1855 		/* SPI */
1856 		sc->sc_flags |= WM_F_EEPROM_SPI;
1857 		wm_nvm_set_addrbits_size_eecd(sc);
1858 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1859 		break;
1860 	case WM_T_82573:
1861 		sc->sc_flags |= WM_F_LOCK_SWSM;
1862 		/* FALLTHROUGH */
1863 	case WM_T_82574:
1864 	case WM_T_82583:
1865 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1866 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1867 			sc->sc_nvm_wordsize = 2048;
1868 		} else {
1869 			/* SPI */
1870 			sc->sc_flags |= WM_F_EEPROM_SPI;
1871 			wm_nvm_set_addrbits_size_eecd(sc);
1872 		}
1873 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1874 		break;
1875 	case WM_T_82575:
1876 	case WM_T_82576:
1877 	case WM_T_82580:
1878 	case WM_T_I350:
1879 	case WM_T_I354:
1880 	case WM_T_80003:
1881 		/* SPI */
1882 		sc->sc_flags |= WM_F_EEPROM_SPI;
1883 		wm_nvm_set_addrbits_size_eecd(sc);
1884 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1885 		    | WM_F_LOCK_SWSM;
1886 		break;
1887 	case WM_T_ICH8:
1888 	case WM_T_ICH9:
1889 	case WM_T_ICH10:
1890 	case WM_T_PCH:
1891 	case WM_T_PCH2:
1892 	case WM_T_PCH_LPT:
1893 		/* FLASH */
1894 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1895 		sc->sc_nvm_wordsize = 2048;
1896 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
1897 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1898 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1899 			aprint_error_dev(sc->sc_dev,
1900 			    "can't map FLASH registers\n");
1901 			goto out;
1902 		}
1903 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1904 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1905 		    ICH_FLASH_SECTOR_SIZE;
1906 		sc->sc_ich8_flash_bank_size =
1907 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1908 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
1909 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1910 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1911 		break;
1912 	case WM_T_I210:
1913 	case WM_T_I211:
1914 		if (wm_nvm_get_flash_presence_i210(sc)) {
1915 			wm_nvm_set_addrbits_size_eecd(sc);
1916 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1917 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1918 		} else {
1919 			sc->sc_nvm_wordsize = INVM_SIZE;
1920 			sc->sc_flags |= WM_F_EEPROM_INVM;
1921 			sc->sc_flags |= WM_F_LOCK_SWFW;
1922 		}
1923 		break;
1924 	default:
1925 		break;
1926 	}
1927 
1928 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
1929 	switch (sc->sc_type) {
1930 	case WM_T_82571:
1931 	case WM_T_82572:
1932 		reg = CSR_READ(sc, WMREG_SWSM2);
1933 		if ((reg & SWSM2_LOCK) == 0) {
1934 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
1935 			force_clear_smbi = true;
1936 		} else
1937 			force_clear_smbi = false;
1938 		break;
1939 	case WM_T_82573:
1940 	case WM_T_82574:
1941 	case WM_T_82583:
1942 		force_clear_smbi = true;
1943 		break;
1944 	default:
1945 		force_clear_smbi = false;
1946 		break;
1947 	}
1948 	if (force_clear_smbi) {
1949 		reg = CSR_READ(sc, WMREG_SWSM);
1950 		if ((reg & SWSM_SMBI) != 0)
1951 			aprint_error_dev(sc->sc_dev,
1952 			    "Please update the Bootagent\n");
1953 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
1954 	}
1955 
1956 	/*
1957 	 * Defer printing the EEPROM type until after verifying the checksum
1958 	 * This allows the EEPROM type to be printed correctly in the case
1959 	 * that no EEPROM is attached.
1960 	 */
1961 	/*
1962 	 * Validate the EEPROM checksum. If the checksum fails, flag
1963 	 * this for later, so we can fail future reads from the EEPROM.
1964 	 */
1965 	if (wm_nvm_validate_checksum(sc)) {
1966 		/*
1967 		 * Read twice again because some PCI-e parts fail the
1968 		 * first check due to the link being in sleep state.
1969 		 */
1970 		if (wm_nvm_validate_checksum(sc))
1971 			sc->sc_flags |= WM_F_EEPROM_INVALID;
1972 	}
1973 
1974 	/* Set device properties (macflags) */
1975 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
1976 
1977 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
1978 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
1979 	else {
1980 		aprint_verbose_dev(sc->sc_dev, "%u words ",
1981 		    sc->sc_nvm_wordsize);
1982 		if (sc->sc_flags & WM_F_EEPROM_INVM)
1983 			aprint_verbose("iNVM");
1984 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
1985 			aprint_verbose("FLASH(HW)");
1986 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
1987 			aprint_verbose("FLASH");
1988 		else {
1989 			if (sc->sc_flags & WM_F_EEPROM_SPI)
1990 				eetype = "SPI";
1991 			else
1992 				eetype = "MicroWire";
1993 			aprint_verbose("(%d address bits) %s EEPROM",
1994 			    sc->sc_nvm_addrbits, eetype);
1995 		}
1996 	}
1997 	wm_nvm_version(sc);
1998 	aprint_verbose("\n");
1999 
2000 	/* Check for I21[01] PLL workaround */
2001 	if (sc->sc_type == WM_T_I210)
2002 		sc->sc_flags |= WM_F_PLL_WA_I210;
2003 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2004 		/* NVM image release 3.25 has a workaround */
2005 		if ((sc->sc_nvm_ver_major < 3)
2006 		    || ((sc->sc_nvm_ver_major == 3)
2007 			&& (sc->sc_nvm_ver_minor < 25))) {
2008 			aprint_verbose_dev(sc->sc_dev,
2009 			    "ROM image version %d.%d is older than 3.25\n",
2010 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2011 			sc->sc_flags |= WM_F_PLL_WA_I210;
2012 		}
2013 	}
2014 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2015 		wm_pll_workaround_i210(sc);
2016 
2017 	wm_get_wakeup(sc);
2018 	switch (sc->sc_type) {
2019 	case WM_T_82571:
2020 	case WM_T_82572:
2021 	case WM_T_82573:
2022 	case WM_T_82574:
2023 	case WM_T_82583:
2024 	case WM_T_80003:
2025 	case WM_T_ICH8:
2026 	case WM_T_ICH9:
2027 	case WM_T_ICH10:
2028 	case WM_T_PCH:
2029 	case WM_T_PCH2:
2030 	case WM_T_PCH_LPT:
2031 		/* Non-AMT based hardware can now take control from firmware */
2032 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2033 			wm_get_hw_control(sc);
2034 		break;
2035 	default:
2036 		break;
2037 	}
2038 
2039 	/*
2040 	 * Read the Ethernet address from the EEPROM, if not first found
2041 	 * in device properties.
2042 	 */
2043 	ea = prop_dictionary_get(dict, "mac-address");
2044 	if (ea != NULL) {
2045 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2046 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2047 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2048 	} else {
2049 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2050 			aprint_error_dev(sc->sc_dev,
2051 			    "unable to read Ethernet address\n");
2052 			goto out;
2053 		}
2054 	}
2055 
2056 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2057 	    ether_sprintf(enaddr));
2058 
2059 	/*
2060 	 * Read the config info from the EEPROM, and set up various
2061 	 * bits in the control registers based on their contents.
2062 	 */
2063 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2064 	if (pn != NULL) {
2065 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2066 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2067 	} else {
2068 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2069 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2070 			goto out;
2071 		}
2072 	}
2073 
2074 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2075 	if (pn != NULL) {
2076 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2077 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2078 	} else {
2079 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2080 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2081 			goto out;
2082 		}
2083 	}
2084 
2085 	/* check for WM_F_WOL */
2086 	switch (sc->sc_type) {
2087 	case WM_T_82542_2_0:
2088 	case WM_T_82542_2_1:
2089 	case WM_T_82543:
2090 		/* dummy? */
2091 		eeprom_data = 0;
2092 		apme_mask = NVM_CFG3_APME;
2093 		break;
2094 	case WM_T_82544:
2095 		apme_mask = NVM_CFG2_82544_APM_EN;
2096 		eeprom_data = cfg2;
2097 		break;
2098 	case WM_T_82546:
2099 	case WM_T_82546_3:
2100 	case WM_T_82571:
2101 	case WM_T_82572:
2102 	case WM_T_82573:
2103 	case WM_T_82574:
2104 	case WM_T_82583:
2105 	case WM_T_80003:
2106 	default:
2107 		apme_mask = NVM_CFG3_APME;
2108 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2109 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2110 		break;
2111 	case WM_T_82575:
2112 	case WM_T_82576:
2113 	case WM_T_82580:
2114 	case WM_T_I350:
2115 	case WM_T_I354: /* XXX ok? */
2116 	case WM_T_ICH8:
2117 	case WM_T_ICH9:
2118 	case WM_T_ICH10:
2119 	case WM_T_PCH:
2120 	case WM_T_PCH2:
2121 	case WM_T_PCH_LPT:
2122 		/* XXX The funcid should be checked on some devices */
2123 		apme_mask = WUC_APME;
2124 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2125 		break;
2126 	}
2127 
2128 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2129 	if ((eeprom_data & apme_mask) != 0)
2130 		sc->sc_flags |= WM_F_WOL;
2131 #ifdef WM_DEBUG
2132 	if ((sc->sc_flags & WM_F_WOL) != 0)
2133 		printf("WOL\n");
2134 #endif
2135 
2136 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2137 		/* Check NVM for autonegotiation */
2138 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2139 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2140 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2141 		}
2142 	}
2143 
2144 	/*
2145 	 * XXX need special handling for some multiple port cards
2146 	 * to disable a paticular port.
2147 	 */
2148 
2149 	if (sc->sc_type >= WM_T_82544) {
2150 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2151 		if (pn != NULL) {
2152 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2153 			swdpin = (uint16_t) prop_number_integer_value(pn);
2154 		} else {
2155 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2156 				aprint_error_dev(sc->sc_dev,
2157 				    "unable to read SWDPIN\n");
2158 				goto out;
2159 			}
2160 		}
2161 	}
2162 
2163 	if (cfg1 & NVM_CFG1_ILOS)
2164 		sc->sc_ctrl |= CTRL_ILOS;
2165 
2166 	/*
2167 	 * XXX
2168 	 * This code isn't correct because pin 2 and 3 are located
2169 	 * in different position on newer chips. Check all datasheet.
2170 	 *
2171 	 * Until resolve this problem, check if a chip < 82580
2172 	 */
2173 	if (sc->sc_type <= WM_T_82580) {
2174 		if (sc->sc_type >= WM_T_82544) {
2175 			sc->sc_ctrl |=
2176 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2177 			    CTRL_SWDPIO_SHIFT;
2178 			sc->sc_ctrl |=
2179 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2180 			    CTRL_SWDPINS_SHIFT;
2181 		} else {
2182 			sc->sc_ctrl |=
2183 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2184 			    CTRL_SWDPIO_SHIFT;
2185 		}
2186 	}
2187 
2188 	/* XXX For other than 82580? */
2189 	if (sc->sc_type == WM_T_82580) {
2190 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2191 		if (nvmword & __BIT(13))
2192 			sc->sc_ctrl |= CTRL_ILOS;
2193 	}
2194 
2195 #if 0
2196 	if (sc->sc_type >= WM_T_82544) {
2197 		if (cfg1 & NVM_CFG1_IPS0)
2198 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2199 		if (cfg1 & NVM_CFG1_IPS1)
2200 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2201 		sc->sc_ctrl_ext |=
2202 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2203 		    CTRL_EXT_SWDPIO_SHIFT;
2204 		sc->sc_ctrl_ext |=
2205 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2206 		    CTRL_EXT_SWDPINS_SHIFT;
2207 	} else {
2208 		sc->sc_ctrl_ext |=
2209 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2210 		    CTRL_EXT_SWDPIO_SHIFT;
2211 	}
2212 #endif
2213 
2214 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2215 #if 0
2216 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2217 #endif
2218 
2219 	if (sc->sc_type == WM_T_PCH) {
2220 		uint16_t val;
2221 
2222 		/* Save the NVM K1 bit setting */
2223 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2224 
2225 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2226 			sc->sc_nvm_k1_enabled = 1;
2227 		else
2228 			sc->sc_nvm_k1_enabled = 0;
2229 	}
2230 
2231 	/*
2232 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2233 	 * media structures accordingly.
2234 	 */
2235 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2236 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2237 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2238 	    || sc->sc_type == WM_T_82573
2239 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2240 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
2241 		wm_gmii_mediainit(sc, wmp->wmp_product);
2242 	} else if (sc->sc_type < WM_T_82543 ||
2243 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2244 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2245 			aprint_error_dev(sc->sc_dev,
2246 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2247 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2248 		}
2249 		wm_tbi_mediainit(sc);
2250 	} else {
2251 		switch (sc->sc_type) {
2252 		case WM_T_82575:
2253 		case WM_T_82576:
2254 		case WM_T_82580:
2255 		case WM_T_I350:
2256 		case WM_T_I354:
2257 		case WM_T_I210:
2258 		case WM_T_I211:
2259 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
2260 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2261 			switch (link_mode) {
2262 			case CTRL_EXT_LINK_MODE_1000KX:
2263 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2264 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2265 				break;
2266 			case CTRL_EXT_LINK_MODE_SGMII:
2267 				if (wm_sgmii_uses_mdio(sc)) {
2268 					aprint_verbose_dev(sc->sc_dev,
2269 					    "SGMII(MDIO)\n");
2270 					sc->sc_flags |= WM_F_SGMII;
2271 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2272 					break;
2273 				}
2274 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2275 				/*FALLTHROUGH*/
2276 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2277 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
2278 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2279 					if (link_mode
2280 					    == CTRL_EXT_LINK_MODE_SGMII) {
2281 						sc->sc_mediatype
2282 						    = WM_MEDIATYPE_COPPER;
2283 						sc->sc_flags |= WM_F_SGMII;
2284 					} else {
2285 						sc->sc_mediatype
2286 						    = WM_MEDIATYPE_SERDES;
2287 						aprint_verbose_dev(sc->sc_dev,
2288 						    "SERDES\n");
2289 					}
2290 					break;
2291 				}
2292 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2293 					aprint_verbose_dev(sc->sc_dev,
2294 					    "SERDES\n");
2295 
2296 				/* Change current link mode setting */
2297 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
2298 				switch (sc->sc_mediatype) {
2299 				case WM_MEDIATYPE_COPPER:
2300 					reg |= CTRL_EXT_LINK_MODE_SGMII;
2301 					break;
2302 				case WM_MEDIATYPE_SERDES:
2303 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2304 					break;
2305 				default:
2306 					break;
2307 				}
2308 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2309 				break;
2310 			case CTRL_EXT_LINK_MODE_GMII:
2311 			default:
2312 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
2313 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2314 				break;
2315 			}
2316 
2317 			reg &= ~CTRL_EXT_I2C_ENA;
2318 			if ((sc->sc_flags & WM_F_SGMII) != 0)
2319 				reg |= CTRL_EXT_I2C_ENA;
2320 			else
2321 				reg &= ~CTRL_EXT_I2C_ENA;
2322 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2323 
2324 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2325 				wm_gmii_mediainit(sc, wmp->wmp_product);
2326 			else
2327 				wm_tbi_mediainit(sc);
2328 			break;
2329 		default:
2330 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2331 				aprint_error_dev(sc->sc_dev,
2332 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2333 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2334 			wm_gmii_mediainit(sc, wmp->wmp_product);
2335 		}
2336 	}
2337 
2338 	ifp = &sc->sc_ethercom.ec_if;
2339 	xname = device_xname(sc->sc_dev);
2340 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2341 	ifp->if_softc = sc;
2342 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2343 	ifp->if_ioctl = wm_ioctl;
2344 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
2345 		ifp->if_start = wm_nq_start;
2346 	else
2347 		ifp->if_start = wm_start;
2348 	ifp->if_watchdog = wm_watchdog;
2349 	ifp->if_init = wm_init;
2350 	ifp->if_stop = wm_stop;
2351 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2352 	IFQ_SET_READY(&ifp->if_snd);
2353 
2354 	/* Check for jumbo frame */
2355 	switch (sc->sc_type) {
2356 	case WM_T_82573:
2357 		/* XXX limited to 9234 if ASPM is disabled */
2358 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2359 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2360 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2361 		break;
2362 	case WM_T_82571:
2363 	case WM_T_82572:
2364 	case WM_T_82574:
2365 	case WM_T_82575:
2366 	case WM_T_82576:
2367 	case WM_T_82580:
2368 	case WM_T_I350:
2369 	case WM_T_I354: /* XXXX ok? */
2370 	case WM_T_I210:
2371 	case WM_T_I211:
2372 	case WM_T_80003:
2373 	case WM_T_ICH9:
2374 	case WM_T_ICH10:
2375 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2376 	case WM_T_PCH_LPT:
2377 		/* XXX limited to 9234 */
2378 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2379 		break;
2380 	case WM_T_PCH:
2381 		/* XXX limited to 4096 */
2382 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2383 		break;
2384 	case WM_T_82542_2_0:
2385 	case WM_T_82542_2_1:
2386 	case WM_T_82583:
2387 	case WM_T_ICH8:
2388 		/* No support for jumbo frame */
2389 		break;
2390 	default:
2391 		/* ETHER_MAX_LEN_JUMBO */
2392 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2393 		break;
2394 	}
2395 
2396 	/* If we're a i82543 or greater, we can support VLANs. */
2397 	if (sc->sc_type >= WM_T_82543)
2398 		sc->sc_ethercom.ec_capabilities |=
2399 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2400 
2401 	/*
2402 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2403 	 * on i82543 and later.
2404 	 */
2405 	if (sc->sc_type >= WM_T_82543) {
2406 		ifp->if_capabilities |=
2407 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2408 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2409 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2410 		    IFCAP_CSUM_TCPv6_Tx |
2411 		    IFCAP_CSUM_UDPv6_Tx;
2412 	}
2413 
2414 	/*
2415 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2416 	 *
2417 	 *	82541GI (8086:1076) ... no
2418 	 *	82572EI (8086:10b9) ... yes
2419 	 */
2420 	if (sc->sc_type >= WM_T_82571) {
2421 		ifp->if_capabilities |=
2422 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2423 	}
2424 
2425 	/*
2426 	 * If we're a i82544 or greater (except i82547), we can do
2427 	 * TCP segmentation offload.
2428 	 */
2429 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2430 		ifp->if_capabilities |= IFCAP_TSOv4;
2431 	}
2432 
2433 	if (sc->sc_type >= WM_T_82571) {
2434 		ifp->if_capabilities |= IFCAP_TSOv6;
2435 	}
2436 
2437 #ifdef WM_MPSAFE
2438 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2439 #else
2440 	sc->sc_core_lock = NULL;
2441 #endif
2442 
2443 	/* Attach the interface. */
2444 	if_attach(ifp);
2445 	ether_ifattach(ifp, enaddr);
2446 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2447 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2448 			  RND_FLAG_DEFAULT);
2449 
2450 #ifdef WM_EVENT_COUNTERS
2451 	/* Attach event counters. */
2452 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2453 	    NULL, xname, "txsstall");
2454 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2455 	    NULL, xname, "txdstall");
2456 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2457 	    NULL, xname, "txfifo_stall");
2458 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2459 	    NULL, xname, "txdw");
2460 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2461 	    NULL, xname, "txqe");
2462 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2463 	    NULL, xname, "rxintr");
2464 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2465 	    NULL, xname, "linkintr");
2466 
2467 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2468 	    NULL, xname, "rxipsum");
2469 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2470 	    NULL, xname, "rxtusum");
2471 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2472 	    NULL, xname, "txipsum");
2473 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2474 	    NULL, xname, "txtusum");
2475 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2476 	    NULL, xname, "txtusum6");
2477 
2478 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2479 	    NULL, xname, "txtso");
2480 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2481 	    NULL, xname, "txtso6");
2482 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2483 	    NULL, xname, "txtsopain");
2484 
2485 	for (i = 0; i < WM_NTXSEGS; i++) {
2486 		snprintf(wm_txseg_evcnt_names[i],
2487 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2488 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2489 		    NULL, xname, wm_txseg_evcnt_names[i]);
2490 	}
2491 
2492 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2493 	    NULL, xname, "txdrop");
2494 
2495 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2496 	    NULL, xname, "tu");
2497 
2498 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2499 	    NULL, xname, "tx_xoff");
2500 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2501 	    NULL, xname, "tx_xon");
2502 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2503 	    NULL, xname, "rx_xoff");
2504 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2505 	    NULL, xname, "rx_xon");
2506 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2507 	    NULL, xname, "rx_macctl");
2508 #endif /* WM_EVENT_COUNTERS */
2509 
2510 	if (pmf_device_register(self, wm_suspend, wm_resume))
2511 		pmf_class_network_register(self, ifp);
2512 	else
2513 		aprint_error_dev(self, "couldn't establish power handler\n");
2514 
2515 	sc->sc_flags |= WM_F_ATTACHED;
2516  out:
2517 	return;
2518 }
2519 
2520 /* The detach function (ca_detach) */
2521 static int
2522 wm_detach(device_t self, int flags __unused)
2523 {
2524 	struct wm_softc *sc = device_private(self);
2525 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2526 	int i;
2527 #ifndef WM_MPSAFE
2528 	int s;
2529 #endif
2530 
2531 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2532 		return 0;
2533 
2534 #ifndef WM_MPSAFE
2535 	s = splnet();
2536 #endif
2537 	/* Stop the interface. Callouts are stopped in it. */
2538 	wm_stop(ifp, 1);
2539 
2540 #ifndef WM_MPSAFE
2541 	splx(s);
2542 #endif
2543 
2544 	pmf_device_deregister(self);
2545 
2546 	/* Tell the firmware about the release */
2547 	WM_CORE_LOCK(sc);
2548 	wm_release_manageability(sc);
2549 	wm_release_hw_control(sc);
2550 	WM_CORE_UNLOCK(sc);
2551 
2552 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2553 
2554 	/* Delete all remaining media. */
2555 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2556 
2557 	ether_ifdetach(ifp);
2558 	if_detach(ifp);
2559 
2560 
2561 	/* Unload RX dmamaps and free mbufs */
2562 	for (i = 0; i < sc->sc_nrxqueues; i++) {
2563 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
2564 		WM_RX_LOCK(rxq);
2565 		wm_rxdrain(rxq);
2566 		WM_RX_UNLOCK(rxq);
2567 	}
2568 	/* Must unlock here */
2569 
2570 	wm_free_txrx_queues(sc);
2571 
2572 	/* Disestablish the interrupt handler */
2573 	for (i = 0; i < sc->sc_nintrs; i++) {
2574 		if (sc->sc_ihs[i] != NULL) {
2575 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2576 			sc->sc_ihs[i] = NULL;
2577 		}
2578 	}
2579 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2580 
2581 	/* Unmap the registers */
2582 	if (sc->sc_ss) {
2583 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2584 		sc->sc_ss = 0;
2585 	}
2586 	if (sc->sc_ios) {
2587 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2588 		sc->sc_ios = 0;
2589 	}
2590 	if (sc->sc_flashs) {
2591 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2592 		sc->sc_flashs = 0;
2593 	}
2594 
2595 	if (sc->sc_core_lock)
2596 		mutex_obj_free(sc->sc_core_lock);
2597 
2598 	return 0;
2599 }
2600 
2601 static bool
2602 wm_suspend(device_t self, const pmf_qual_t *qual)
2603 {
2604 	struct wm_softc *sc = device_private(self);
2605 
2606 	wm_release_manageability(sc);
2607 	wm_release_hw_control(sc);
2608 #ifdef WM_WOL
2609 	wm_enable_wakeup(sc);
2610 #endif
2611 
2612 	return true;
2613 }
2614 
2615 static bool
2616 wm_resume(device_t self, const pmf_qual_t *qual)
2617 {
2618 	struct wm_softc *sc = device_private(self);
2619 
2620 	wm_init_manageability(sc);
2621 
2622 	return true;
2623 }
2624 
2625 /*
2626  * wm_watchdog:		[ifnet interface function]
2627  *
2628  *	Watchdog timer handler.
2629  */
2630 static void
2631 wm_watchdog(struct ifnet *ifp)
2632 {
2633 	struct wm_softc *sc = ifp->if_softc;
2634 	struct wm_txqueue *txq = &sc->sc_txq[0];
2635 
2636 	/*
2637 	 * Since we're using delayed interrupts, sweep up
2638 	 * before we report an error.
2639 	 */
2640 	WM_TX_LOCK(txq);
2641 	wm_txeof(sc);
2642 	WM_TX_UNLOCK(txq);
2643 
2644 	if (txq->txq_free != WM_NTXDESC(txq)) {
2645 #ifdef WM_DEBUG
2646 		int i, j;
2647 		struct wm_txsoft *txs;
2648 #endif
2649 		log(LOG_ERR,
2650 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2651 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2652 		    txq->txq_next);
2653 		ifp->if_oerrors++;
2654 #ifdef WM_DEBUG
2655 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
2656 		    i = WM_NEXTTXS(txq, i)) {
2657 		    txs = &txq->txq_soft[i];
2658 		    printf("txs %d tx %d -> %d\n",
2659 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2660 		    for (j = txs->txs_firstdesc; ;
2661 			j = WM_NEXTTX(txq, j)) {
2662 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2663 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2664 			printf("\t %#08x%08x\n",
2665 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2666 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2667 			if (j == txs->txs_lastdesc)
2668 				break;
2669 			}
2670 		}
2671 #endif
2672 		/* Reset the interface. */
2673 		(void) wm_init(ifp);
2674 	}
2675 
2676 	/* Try to get more packets going. */
2677 	ifp->if_start(ifp);
2678 }
2679 
2680 /*
2681  * wm_tick:
2682  *
2683  *	One second timer, used to check link status, sweep up
2684  *	completed transmit jobs, etc.
2685  */
2686 static void
2687 wm_tick(void *arg)
2688 {
2689 	struct wm_softc *sc = arg;
2690 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2691 #ifndef WM_MPSAFE
2692 	int s;
2693 
2694 	s = splnet();
2695 #endif
2696 
2697 	WM_CORE_LOCK(sc);
2698 
2699 	if (sc->sc_stopping)
2700 		goto out;
2701 
2702 	if (sc->sc_type >= WM_T_82542_2_1) {
2703 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2704 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2705 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2706 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2707 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2708 	}
2709 
2710 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2711 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
2712 	    + CSR_READ(sc, WMREG_CRCERRS)
2713 	    + CSR_READ(sc, WMREG_ALGNERRC)
2714 	    + CSR_READ(sc, WMREG_SYMERRC)
2715 	    + CSR_READ(sc, WMREG_RXERRC)
2716 	    + CSR_READ(sc, WMREG_SEC)
2717 	    + CSR_READ(sc, WMREG_CEXTERR)
2718 	    + CSR_READ(sc, WMREG_RLEC);
2719 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2720 
2721 	if (sc->sc_flags & WM_F_HAS_MII)
2722 		mii_tick(&sc->sc_mii);
2723 	else if ((sc->sc_type >= WM_T_82575)
2724 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2725 		wm_serdes_tick(sc);
2726 	else
2727 		wm_tbi_tick(sc);
2728 
2729 out:
2730 	WM_CORE_UNLOCK(sc);
2731 #ifndef WM_MPSAFE
2732 	splx(s);
2733 #endif
2734 
2735 	if (!sc->sc_stopping)
2736 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2737 }
2738 
2739 static int
2740 wm_ifflags_cb(struct ethercom *ec)
2741 {
2742 	struct ifnet *ifp = &ec->ec_if;
2743 	struct wm_softc *sc = ifp->if_softc;
2744 	int change = ifp->if_flags ^ sc->sc_if_flags;
2745 	int rc = 0;
2746 
2747 	WM_CORE_LOCK(sc);
2748 
2749 	if (change != 0)
2750 		sc->sc_if_flags = ifp->if_flags;
2751 
2752 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2753 		rc = ENETRESET;
2754 		goto out;
2755 	}
2756 
2757 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2758 		wm_set_filter(sc);
2759 
2760 	wm_set_vlan(sc);
2761 
2762 out:
2763 	WM_CORE_UNLOCK(sc);
2764 
2765 	return rc;
2766 }
2767 
2768 /*
2769  * wm_ioctl:		[ifnet interface function]
2770  *
2771  *	Handle control requests from the operator.
2772  */
2773 static int
2774 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2775 {
2776 	struct wm_softc *sc = ifp->if_softc;
2777 	struct ifreq *ifr = (struct ifreq *) data;
2778 	struct ifaddr *ifa = (struct ifaddr *)data;
2779 	struct sockaddr_dl *sdl;
2780 	int s, error;
2781 
2782 #ifndef WM_MPSAFE
2783 	s = splnet();
2784 #endif
2785 	switch (cmd) {
2786 	case SIOCSIFMEDIA:
2787 	case SIOCGIFMEDIA:
2788 		WM_CORE_LOCK(sc);
2789 		/* Flow control requires full-duplex mode. */
2790 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2791 		    (ifr->ifr_media & IFM_FDX) == 0)
2792 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2793 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2794 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2795 				/* We can do both TXPAUSE and RXPAUSE. */
2796 				ifr->ifr_media |=
2797 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2798 			}
2799 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2800 		}
2801 		WM_CORE_UNLOCK(sc);
2802 #ifdef WM_MPSAFE
2803 		s = splnet();
2804 #endif
2805 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2806 #ifdef WM_MPSAFE
2807 		splx(s);
2808 #endif
2809 		break;
2810 	case SIOCINITIFADDR:
2811 		WM_CORE_LOCK(sc);
2812 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2813 			sdl = satosdl(ifp->if_dl->ifa_addr);
2814 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2815 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2816 			/* unicast address is first multicast entry */
2817 			wm_set_filter(sc);
2818 			error = 0;
2819 			WM_CORE_UNLOCK(sc);
2820 			break;
2821 		}
2822 		WM_CORE_UNLOCK(sc);
2823 		/*FALLTHROUGH*/
2824 	default:
2825 #ifdef WM_MPSAFE
2826 		s = splnet();
2827 #endif
2828 		/* It may call wm_start, so unlock here */
2829 		error = ether_ioctl(ifp, cmd, data);
2830 #ifdef WM_MPSAFE
2831 		splx(s);
2832 #endif
2833 		if (error != ENETRESET)
2834 			break;
2835 
2836 		error = 0;
2837 
2838 		if (cmd == SIOCSIFCAP) {
2839 			error = (*ifp->if_init)(ifp);
2840 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2841 			;
2842 		else if (ifp->if_flags & IFF_RUNNING) {
2843 			/*
2844 			 * Multicast list has changed; set the hardware filter
2845 			 * accordingly.
2846 			 */
2847 			WM_CORE_LOCK(sc);
2848 			wm_set_filter(sc);
2849 			WM_CORE_UNLOCK(sc);
2850 		}
2851 		break;
2852 	}
2853 
2854 #ifndef WM_MPSAFE
2855 	splx(s);
2856 #endif
2857 	return error;
2858 }
2859 
2860 /* MAC address related */
2861 
2862 /*
2863  * Get the offset of MAC address and return it.
2864  * If error occured, use offset 0.
2865  */
2866 static uint16_t
2867 wm_check_alt_mac_addr(struct wm_softc *sc)
2868 {
2869 	uint16_t myea[ETHER_ADDR_LEN / 2];
2870 	uint16_t offset = NVM_OFF_MACADDR;
2871 
2872 	/* Try to read alternative MAC address pointer */
2873 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2874 		return 0;
2875 
2876 	/* Check pointer if it's valid or not. */
2877 	if ((offset == 0x0000) || (offset == 0xffff))
2878 		return 0;
2879 
2880 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2881 	/*
2882 	 * Check whether alternative MAC address is valid or not.
2883 	 * Some cards have non 0xffff pointer but those don't use
2884 	 * alternative MAC address in reality.
2885 	 *
2886 	 * Check whether the broadcast bit is set or not.
2887 	 */
2888 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
2889 		if (((myea[0] & 0xff) & 0x01) == 0)
2890 			return offset; /* Found */
2891 
2892 	/* Not found */
2893 	return 0;
2894 }
2895 
2896 static int
2897 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2898 {
2899 	uint16_t myea[ETHER_ADDR_LEN / 2];
2900 	uint16_t offset = NVM_OFF_MACADDR;
2901 	int do_invert = 0;
2902 
2903 	switch (sc->sc_type) {
2904 	case WM_T_82580:
2905 	case WM_T_I350:
2906 	case WM_T_I354:
2907 		/* EEPROM Top Level Partitioning */
2908 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2909 		break;
2910 	case WM_T_82571:
2911 	case WM_T_82575:
2912 	case WM_T_82576:
2913 	case WM_T_80003:
2914 	case WM_T_I210:
2915 	case WM_T_I211:
2916 		offset = wm_check_alt_mac_addr(sc);
2917 		if (offset == 0)
2918 			if ((sc->sc_funcid & 0x01) == 1)
2919 				do_invert = 1;
2920 		break;
2921 	default:
2922 		if ((sc->sc_funcid & 0x01) == 1)
2923 			do_invert = 1;
2924 		break;
2925 	}
2926 
2927 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
2928 		myea) != 0)
2929 		goto bad;
2930 
2931 	enaddr[0] = myea[0] & 0xff;
2932 	enaddr[1] = myea[0] >> 8;
2933 	enaddr[2] = myea[1] & 0xff;
2934 	enaddr[3] = myea[1] >> 8;
2935 	enaddr[4] = myea[2] & 0xff;
2936 	enaddr[5] = myea[2] >> 8;
2937 
2938 	/*
2939 	 * Toggle the LSB of the MAC address on the second port
2940 	 * of some dual port cards.
2941 	 */
2942 	if (do_invert != 0)
2943 		enaddr[5] ^= 1;
2944 
2945 	return 0;
2946 
2947  bad:
2948 	return -1;
2949 }
2950 
2951 /*
2952  * wm_set_ral:
2953  *
2954  *	Set an entery in the receive address list.
2955  */
2956 static void
2957 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
2958 {
2959 	uint32_t ral_lo, ral_hi;
2960 
2961 	if (enaddr != NULL) {
2962 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
2963 		    (enaddr[3] << 24);
2964 		ral_hi = enaddr[4] | (enaddr[5] << 8);
2965 		ral_hi |= RAL_AV;
2966 	} else {
2967 		ral_lo = 0;
2968 		ral_hi = 0;
2969 	}
2970 
2971 	if (sc->sc_type >= WM_T_82544) {
2972 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
2973 		    ral_lo);
2974 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
2975 		    ral_hi);
2976 	} else {
2977 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
2978 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
2979 	}
2980 }
2981 
2982 /*
2983  * wm_mchash:
2984  *
2985  *	Compute the hash of the multicast address for the 4096-bit
2986  *	multicast filter.
2987  */
2988 static uint32_t
2989 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
2990 {
2991 	static const int lo_shift[4] = { 4, 3, 2, 0 };
2992 	static const int hi_shift[4] = { 4, 5, 6, 8 };
2993 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
2994 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
2995 	uint32_t hash;
2996 
2997 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
2998 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
2999 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
3000 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3001 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3002 		return (hash & 0x3ff);
3003 	}
3004 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3005 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3006 
3007 	return (hash & 0xfff);
3008 }
3009 
3010 /*
3011  * wm_set_filter:
3012  *
3013  *	Set up the receive filter.
3014  */
3015 static void
3016 wm_set_filter(struct wm_softc *sc)
3017 {
3018 	struct ethercom *ec = &sc->sc_ethercom;
3019 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3020 	struct ether_multi *enm;
3021 	struct ether_multistep step;
3022 	bus_addr_t mta_reg;
3023 	uint32_t hash, reg, bit;
3024 	int i, size, max;
3025 
3026 	if (sc->sc_type >= WM_T_82544)
3027 		mta_reg = WMREG_CORDOVA_MTA;
3028 	else
3029 		mta_reg = WMREG_MTA;
3030 
3031 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3032 
3033 	if (ifp->if_flags & IFF_BROADCAST)
3034 		sc->sc_rctl |= RCTL_BAM;
3035 	if (ifp->if_flags & IFF_PROMISC) {
3036 		sc->sc_rctl |= RCTL_UPE;
3037 		goto allmulti;
3038 	}
3039 
3040 	/*
3041 	 * Set the station address in the first RAL slot, and
3042 	 * clear the remaining slots.
3043 	 */
3044 	if (sc->sc_type == WM_T_ICH8)
3045 		size = WM_RAL_TABSIZE_ICH8 -1;
3046 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3047 	    || (sc->sc_type == WM_T_PCH))
3048 		size = WM_RAL_TABSIZE_ICH8;
3049 	else if (sc->sc_type == WM_T_PCH2)
3050 		size = WM_RAL_TABSIZE_PCH2;
3051 	else if (sc->sc_type == WM_T_PCH_LPT)
3052 		size = WM_RAL_TABSIZE_PCH_LPT;
3053 	else if (sc->sc_type == WM_T_82575)
3054 		size = WM_RAL_TABSIZE_82575;
3055 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3056 		size = WM_RAL_TABSIZE_82576;
3057 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3058 		size = WM_RAL_TABSIZE_I350;
3059 	else
3060 		size = WM_RAL_TABSIZE;
3061 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3062 
3063 	if (sc->sc_type == WM_T_PCH_LPT) {
3064 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3065 		switch (i) {
3066 		case 0:
3067 			/* We can use all entries */
3068 			max = size;
3069 			break;
3070 		case 1:
3071 			/* Only RAR[0] */
3072 			max = 1;
3073 			break;
3074 		default:
3075 			/* available SHRA + RAR[0] */
3076 			max = i + 1;
3077 		}
3078 	} else
3079 		max = size;
3080 	for (i = 1; i < size; i++) {
3081 		if (i < max)
3082 			wm_set_ral(sc, NULL, i);
3083 	}
3084 
3085 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3086 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3087 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
3088 		size = WM_ICH8_MC_TABSIZE;
3089 	else
3090 		size = WM_MC_TABSIZE;
3091 	/* Clear out the multicast table. */
3092 	for (i = 0; i < size; i++)
3093 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3094 
3095 	ETHER_FIRST_MULTI(step, ec, enm);
3096 	while (enm != NULL) {
3097 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3098 			/*
3099 			 * We must listen to a range of multicast addresses.
3100 			 * For now, just accept all multicasts, rather than
3101 			 * trying to set only those filter bits needed to match
3102 			 * the range.  (At this time, the only use of address
3103 			 * ranges is for IP multicast routing, for which the
3104 			 * range is big enough to require all bits set.)
3105 			 */
3106 			goto allmulti;
3107 		}
3108 
3109 		hash = wm_mchash(sc, enm->enm_addrlo);
3110 
3111 		reg = (hash >> 5);
3112 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3113 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3114 		    || (sc->sc_type == WM_T_PCH2)
3115 		    || (sc->sc_type == WM_T_PCH_LPT))
3116 			reg &= 0x1f;
3117 		else
3118 			reg &= 0x7f;
3119 		bit = hash & 0x1f;
3120 
3121 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3122 		hash |= 1U << bit;
3123 
3124 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3125 			/*
3126 			 * 82544 Errata 9: Certain register cannot be written
3127 			 * with particular alignments in PCI-X bus operation
3128 			 * (FCAH, MTA and VFTA).
3129 			 */
3130 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3131 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3132 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3133 		} else
3134 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3135 
3136 		ETHER_NEXT_MULTI(step, enm);
3137 	}
3138 
3139 	ifp->if_flags &= ~IFF_ALLMULTI;
3140 	goto setit;
3141 
3142  allmulti:
3143 	ifp->if_flags |= IFF_ALLMULTI;
3144 	sc->sc_rctl |= RCTL_MPE;
3145 
3146  setit:
3147 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3148 }
3149 
3150 /* Reset and init related */
3151 
3152 static void
3153 wm_set_vlan(struct wm_softc *sc)
3154 {
3155 	/* Deal with VLAN enables. */
3156 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3157 		sc->sc_ctrl |= CTRL_VME;
3158 	else
3159 		sc->sc_ctrl &= ~CTRL_VME;
3160 
3161 	/* Write the control registers. */
3162 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3163 }
3164 
3165 static void
3166 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3167 {
3168 	uint32_t gcr;
3169 	pcireg_t ctrl2;
3170 
3171 	gcr = CSR_READ(sc, WMREG_GCR);
3172 
3173 	/* Only take action if timeout value is defaulted to 0 */
3174 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3175 		goto out;
3176 
3177 	if ((gcr & GCR_CAP_VER2) == 0) {
3178 		gcr |= GCR_CMPL_TMOUT_10MS;
3179 		goto out;
3180 	}
3181 
3182 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3183 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3184 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3185 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3186 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3187 
3188 out:
3189 	/* Disable completion timeout resend */
3190 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3191 
3192 	CSR_WRITE(sc, WMREG_GCR, gcr);
3193 }
3194 
3195 void
3196 wm_get_auto_rd_done(struct wm_softc *sc)
3197 {
3198 	int i;
3199 
3200 	/* wait for eeprom to reload */
3201 	switch (sc->sc_type) {
3202 	case WM_T_82571:
3203 	case WM_T_82572:
3204 	case WM_T_82573:
3205 	case WM_T_82574:
3206 	case WM_T_82583:
3207 	case WM_T_82575:
3208 	case WM_T_82576:
3209 	case WM_T_82580:
3210 	case WM_T_I350:
3211 	case WM_T_I354:
3212 	case WM_T_I210:
3213 	case WM_T_I211:
3214 	case WM_T_80003:
3215 	case WM_T_ICH8:
3216 	case WM_T_ICH9:
3217 		for (i = 0; i < 10; i++) {
3218 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3219 				break;
3220 			delay(1000);
3221 		}
3222 		if (i == 10) {
3223 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3224 			    "complete\n", device_xname(sc->sc_dev));
3225 		}
3226 		break;
3227 	default:
3228 		break;
3229 	}
3230 }
3231 
3232 void
3233 wm_lan_init_done(struct wm_softc *sc)
3234 {
3235 	uint32_t reg = 0;
3236 	int i;
3237 
3238 	/* wait for eeprom to reload */
3239 	switch (sc->sc_type) {
3240 	case WM_T_ICH10:
3241 	case WM_T_PCH:
3242 	case WM_T_PCH2:
3243 	case WM_T_PCH_LPT:
3244 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3245 			reg = CSR_READ(sc, WMREG_STATUS);
3246 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3247 				break;
3248 			delay(100);
3249 		}
3250 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3251 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3252 			    "complete\n", device_xname(sc->sc_dev), __func__);
3253 		}
3254 		break;
3255 	default:
3256 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3257 		    __func__);
3258 		break;
3259 	}
3260 
3261 	reg &= ~STATUS_LAN_INIT_DONE;
3262 	CSR_WRITE(sc, WMREG_STATUS, reg);
3263 }
3264 
3265 void
3266 wm_get_cfg_done(struct wm_softc *sc)
3267 {
3268 	int mask;
3269 	uint32_t reg;
3270 	int i;
3271 
3272 	/* wait for eeprom to reload */
3273 	switch (sc->sc_type) {
3274 	case WM_T_82542_2_0:
3275 	case WM_T_82542_2_1:
3276 		/* null */
3277 		break;
3278 	case WM_T_82543:
3279 	case WM_T_82544:
3280 	case WM_T_82540:
3281 	case WM_T_82545:
3282 	case WM_T_82545_3:
3283 	case WM_T_82546:
3284 	case WM_T_82546_3:
3285 	case WM_T_82541:
3286 	case WM_T_82541_2:
3287 	case WM_T_82547:
3288 	case WM_T_82547_2:
3289 	case WM_T_82573:
3290 	case WM_T_82574:
3291 	case WM_T_82583:
3292 		/* generic */
3293 		delay(10*1000);
3294 		break;
3295 	case WM_T_80003:
3296 	case WM_T_82571:
3297 	case WM_T_82572:
3298 	case WM_T_82575:
3299 	case WM_T_82576:
3300 	case WM_T_82580:
3301 	case WM_T_I350:
3302 	case WM_T_I354:
3303 	case WM_T_I210:
3304 	case WM_T_I211:
3305 		if (sc->sc_type == WM_T_82571) {
3306 			/* Only 82571 shares port 0 */
3307 			mask = EEMNGCTL_CFGDONE_0;
3308 		} else
3309 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3310 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3311 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3312 				break;
3313 			delay(1000);
3314 		}
3315 		if (i >= WM_PHY_CFG_TIMEOUT) {
3316 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3317 				device_xname(sc->sc_dev), __func__));
3318 		}
3319 		break;
3320 	case WM_T_ICH8:
3321 	case WM_T_ICH9:
3322 	case WM_T_ICH10:
3323 	case WM_T_PCH:
3324 	case WM_T_PCH2:
3325 	case WM_T_PCH_LPT:
3326 		delay(10*1000);
3327 		if (sc->sc_type >= WM_T_ICH10)
3328 			wm_lan_init_done(sc);
3329 		else
3330 			wm_get_auto_rd_done(sc);
3331 
3332 		reg = CSR_READ(sc, WMREG_STATUS);
3333 		if ((reg & STATUS_PHYRA) != 0)
3334 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3335 		break;
3336 	default:
3337 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3338 		    __func__);
3339 		break;
3340 	}
3341 }
3342 
3343 /* Init hardware bits */
3344 void
3345 wm_initialize_hardware_bits(struct wm_softc *sc)
3346 {
3347 	uint32_t tarc0, tarc1, reg;
3348 
3349 	/* For 82571 variant, 80003 and ICHs */
3350 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3351 	    || (sc->sc_type >= WM_T_80003)) {
3352 
3353 		/* Transmit Descriptor Control 0 */
3354 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
3355 		reg |= TXDCTL_COUNT_DESC;
3356 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3357 
3358 		/* Transmit Descriptor Control 1 */
3359 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
3360 		reg |= TXDCTL_COUNT_DESC;
3361 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3362 
3363 		/* TARC0 */
3364 		tarc0 = CSR_READ(sc, WMREG_TARC0);
3365 		switch (sc->sc_type) {
3366 		case WM_T_82571:
3367 		case WM_T_82572:
3368 		case WM_T_82573:
3369 		case WM_T_82574:
3370 		case WM_T_82583:
3371 		case WM_T_80003:
3372 			/* Clear bits 30..27 */
3373 			tarc0 &= ~__BITS(30, 27);
3374 			break;
3375 		default:
3376 			break;
3377 		}
3378 
3379 		switch (sc->sc_type) {
3380 		case WM_T_82571:
3381 		case WM_T_82572:
3382 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3383 
3384 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3385 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3386 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3387 			/* 8257[12] Errata No.7 */
3388 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
3389 
3390 			/* TARC1 bit 28 */
3391 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3392 				tarc1 &= ~__BIT(28);
3393 			else
3394 				tarc1 |= __BIT(28);
3395 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3396 
3397 			/*
3398 			 * 8257[12] Errata No.13
3399 			 * Disable Dyamic Clock Gating.
3400 			 */
3401 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3402 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
3403 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3404 			break;
3405 		case WM_T_82573:
3406 		case WM_T_82574:
3407 		case WM_T_82583:
3408 			if ((sc->sc_type == WM_T_82574)
3409 			    || (sc->sc_type == WM_T_82583))
3410 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
3411 
3412 			/* Extended Device Control */
3413 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3414 			reg &= ~__BIT(23);	/* Clear bit 23 */
3415 			reg |= __BIT(22);	/* Set bit 22 */
3416 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3417 
3418 			/* Device Control */
3419 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
3420 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3421 
3422 			/* PCIe Control Register */
3423 			/*
3424 			 * 82573 Errata (unknown).
3425 			 *
3426 			 * 82574 Errata 25 and 82583 Errata 12
3427 			 * "Dropped Rx Packets":
3428 			 *   NVM Image Version 2.1.4 and newer has no this bug.
3429 			 */
3430 			reg = CSR_READ(sc, WMREG_GCR);
3431 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3432 			CSR_WRITE(sc, WMREG_GCR, reg);
3433 
3434 			if ((sc->sc_type == WM_T_82574)
3435 			    || (sc->sc_type == WM_T_82583)) {
3436 				/*
3437 				 * Document says this bit must be set for
3438 				 * proper operation.
3439 				 */
3440 				reg = CSR_READ(sc, WMREG_GCR);
3441 				reg |= __BIT(22);
3442 				CSR_WRITE(sc, WMREG_GCR, reg);
3443 
3444 				/*
3445 				 * Apply workaround for hardware errata
3446 				 * documented in errata docs Fixes issue where
3447 				 * some error prone or unreliable PCIe
3448 				 * completions are occurring, particularly
3449 				 * with ASPM enabled. Without fix, issue can
3450 				 * cause Tx timeouts.
3451 				 */
3452 				reg = CSR_READ(sc, WMREG_GCR2);
3453 				reg |= __BIT(0);
3454 				CSR_WRITE(sc, WMREG_GCR2, reg);
3455 			}
3456 			break;
3457 		case WM_T_80003:
3458 			/* TARC0 */
3459 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3460 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3461 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
3462 
3463 			/* TARC1 bit 28 */
3464 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3465 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3466 				tarc1 &= ~__BIT(28);
3467 			else
3468 				tarc1 |= __BIT(28);
3469 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3470 			break;
3471 		case WM_T_ICH8:
3472 		case WM_T_ICH9:
3473 		case WM_T_ICH10:
3474 		case WM_T_PCH:
3475 		case WM_T_PCH2:
3476 		case WM_T_PCH_LPT:
3477 			/* TARC 0 */
3478 			if (sc->sc_type == WM_T_ICH8) {
3479 				/* Set TARC0 bits 29 and 28 */
3480 				tarc0 |= __BITS(29, 28);
3481 			}
3482 			/* Set TARC0 bits 23,24,26,27 */
3483 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3484 
3485 			/* CTRL_EXT */
3486 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3487 			reg |= __BIT(22);	/* Set bit 22 */
3488 			/*
3489 			 * Enable PHY low-power state when MAC is at D3
3490 			 * w/o WoL
3491 			 */
3492 			if (sc->sc_type >= WM_T_PCH)
3493 				reg |= CTRL_EXT_PHYPDEN;
3494 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3495 
3496 			/* TARC1 */
3497 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3498 			/* bit 28 */
3499 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3500 				tarc1 &= ~__BIT(28);
3501 			else
3502 				tarc1 |= __BIT(28);
3503 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3504 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3505 
3506 			/* Device Status */
3507 			if (sc->sc_type == WM_T_ICH8) {
3508 				reg = CSR_READ(sc, WMREG_STATUS);
3509 				reg &= ~__BIT(31);
3510 				CSR_WRITE(sc, WMREG_STATUS, reg);
3511 
3512 			}
3513 
3514 			/*
3515 			 * Work-around descriptor data corruption issue during
3516 			 * NFS v2 UDP traffic, just disable the NFS filtering
3517 			 * capability.
3518 			 */
3519 			reg = CSR_READ(sc, WMREG_RFCTL);
3520 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3521 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3522 			break;
3523 		default:
3524 			break;
3525 		}
3526 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
3527 
3528 		/*
3529 		 * 8257[12] Errata No.52 and some others.
3530 		 * Avoid RSS Hash Value bug.
3531 		 */
3532 		switch (sc->sc_type) {
3533 		case WM_T_82571:
3534 		case WM_T_82572:
3535 		case WM_T_82573:
3536 		case WM_T_80003:
3537 		case WM_T_ICH8:
3538 			reg = CSR_READ(sc, WMREG_RFCTL);
3539 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3540 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3541 			break;
3542 		default:
3543 			break;
3544 		}
3545 	}
3546 }
3547 
3548 static uint32_t
3549 wm_rxpbs_adjust_82580(uint32_t val)
3550 {
3551 	uint32_t rv = 0;
3552 
3553 	if (val < __arraycount(wm_82580_rxpbs_table))
3554 		rv = wm_82580_rxpbs_table[val];
3555 
3556 	return rv;
3557 }
3558 
3559 /*
3560  * wm_reset:
3561  *
3562  *	Reset the i82542 chip.
3563  */
3564 static void
3565 wm_reset(struct wm_softc *sc)
3566 {
3567 	int phy_reset = 0;
3568 	int i, error = 0;
3569 	uint32_t reg, mask;
3570 
3571 	/*
3572 	 * Allocate on-chip memory according to the MTU size.
3573 	 * The Packet Buffer Allocation register must be written
3574 	 * before the chip is reset.
3575 	 */
3576 	switch (sc->sc_type) {
3577 	case WM_T_82547:
3578 	case WM_T_82547_2:
3579 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3580 		    PBA_22K : PBA_30K;
3581 		for (i = 0; i < sc->sc_ntxqueues; i++) {
3582 			struct wm_txqueue *txq = &sc->sc_txq[i];
3583 			txq->txq_fifo_head = 0;
3584 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3585 			txq->txq_fifo_size =
3586 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3587 			txq->txq_fifo_stall = 0;
3588 		}
3589 		break;
3590 	case WM_T_82571:
3591 	case WM_T_82572:
3592 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3593 	case WM_T_80003:
3594 		sc->sc_pba = PBA_32K;
3595 		break;
3596 	case WM_T_82573:
3597 		sc->sc_pba = PBA_12K;
3598 		break;
3599 	case WM_T_82574:
3600 	case WM_T_82583:
3601 		sc->sc_pba = PBA_20K;
3602 		break;
3603 	case WM_T_82576:
3604 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3605 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3606 		break;
3607 	case WM_T_82580:
3608 	case WM_T_I350:
3609 	case WM_T_I354:
3610 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3611 		break;
3612 	case WM_T_I210:
3613 	case WM_T_I211:
3614 		sc->sc_pba = PBA_34K;
3615 		break;
3616 	case WM_T_ICH8:
3617 		/* Workaround for a bit corruption issue in FIFO memory */
3618 		sc->sc_pba = PBA_8K;
3619 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3620 		break;
3621 	case WM_T_ICH9:
3622 	case WM_T_ICH10:
3623 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3624 		    PBA_14K : PBA_10K;
3625 		break;
3626 	case WM_T_PCH:
3627 	case WM_T_PCH2:
3628 	case WM_T_PCH_LPT:
3629 		sc->sc_pba = PBA_26K;
3630 		break;
3631 	default:
3632 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3633 		    PBA_40K : PBA_48K;
3634 		break;
3635 	}
3636 	/*
3637 	 * Only old or non-multiqueue devices have the PBA register
3638 	 * XXX Need special handling for 82575.
3639 	 */
3640 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3641 	    || (sc->sc_type == WM_T_82575))
3642 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3643 
3644 	/* Prevent the PCI-E bus from sticking */
3645 	if (sc->sc_flags & WM_F_PCIE) {
3646 		int timeout = 800;
3647 
3648 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3649 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3650 
3651 		while (timeout--) {
3652 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3653 			    == 0)
3654 				break;
3655 			delay(100);
3656 		}
3657 	}
3658 
3659 	/* Set the completion timeout for interface */
3660 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3661 	    || (sc->sc_type == WM_T_82580)
3662 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3663 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3664 		wm_set_pcie_completion_timeout(sc);
3665 
3666 	/* Clear interrupt */
3667 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3668 	if (sc->sc_nintrs > 1) {
3669 		if (sc->sc_type != WM_T_82574) {
3670 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3671 			CSR_WRITE(sc, WMREG_EIAC, 0);
3672 		} else {
3673 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3674 		}
3675 	}
3676 
3677 	/* Stop the transmit and receive processes. */
3678 	CSR_WRITE(sc, WMREG_RCTL, 0);
3679 	sc->sc_rctl &= ~RCTL_EN;
3680 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3681 	CSR_WRITE_FLUSH(sc);
3682 
3683 	/* XXX set_tbi_sbp_82543() */
3684 
3685 	delay(10*1000);
3686 
3687 	/* Must acquire the MDIO ownership before MAC reset */
3688 	switch (sc->sc_type) {
3689 	case WM_T_82573:
3690 	case WM_T_82574:
3691 	case WM_T_82583:
3692 		error = wm_get_hw_semaphore_82573(sc);
3693 		break;
3694 	default:
3695 		break;
3696 	}
3697 
3698 	/*
3699 	 * 82541 Errata 29? & 82547 Errata 28?
3700 	 * See also the description about PHY_RST bit in CTRL register
3701 	 * in 8254x_GBe_SDM.pdf.
3702 	 */
3703 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3704 		CSR_WRITE(sc, WMREG_CTRL,
3705 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3706 		CSR_WRITE_FLUSH(sc);
3707 		delay(5000);
3708 	}
3709 
3710 	switch (sc->sc_type) {
3711 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3712 	case WM_T_82541:
3713 	case WM_T_82541_2:
3714 	case WM_T_82547:
3715 	case WM_T_82547_2:
3716 		/*
3717 		 * On some chipsets, a reset through a memory-mapped write
3718 		 * cycle can cause the chip to reset before completing the
3719 		 * write cycle.  This causes major headache that can be
3720 		 * avoided by issuing the reset via indirect register writes
3721 		 * through I/O space.
3722 		 *
3723 		 * So, if we successfully mapped the I/O BAR at attach time,
3724 		 * use that.  Otherwise, try our luck with a memory-mapped
3725 		 * reset.
3726 		 */
3727 		if (sc->sc_flags & WM_F_IOH_VALID)
3728 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3729 		else
3730 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3731 		break;
3732 	case WM_T_82545_3:
3733 	case WM_T_82546_3:
3734 		/* Use the shadow control register on these chips. */
3735 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3736 		break;
3737 	case WM_T_80003:
3738 		mask = swfwphysem[sc->sc_funcid];
3739 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3740 		wm_get_swfw_semaphore(sc, mask);
3741 		CSR_WRITE(sc, WMREG_CTRL, reg);
3742 		wm_put_swfw_semaphore(sc, mask);
3743 		break;
3744 	case WM_T_ICH8:
3745 	case WM_T_ICH9:
3746 	case WM_T_ICH10:
3747 	case WM_T_PCH:
3748 	case WM_T_PCH2:
3749 	case WM_T_PCH_LPT:
3750 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3751 		if (wm_phy_resetisblocked(sc) == false) {
3752 			/*
3753 			 * Gate automatic PHY configuration by hardware on
3754 			 * non-managed 82579
3755 			 */
3756 			if ((sc->sc_type == WM_T_PCH2)
3757 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3758 				== 0))
3759 				wm_gate_hw_phy_config_ich8lan(sc, 1);
3760 
3761 			reg |= CTRL_PHY_RESET;
3762 			phy_reset = 1;
3763 		}
3764 		wm_get_swfwhw_semaphore(sc);
3765 		CSR_WRITE(sc, WMREG_CTRL, reg);
3766 		/* Don't insert a completion barrier when reset */
3767 		delay(20*1000);
3768 		wm_put_swfwhw_semaphore(sc);
3769 		break;
3770 	case WM_T_82580:
3771 	case WM_T_I350:
3772 	case WM_T_I354:
3773 	case WM_T_I210:
3774 	case WM_T_I211:
3775 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3776 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3777 			CSR_WRITE_FLUSH(sc);
3778 		delay(5000);
3779 		break;
3780 	case WM_T_82542_2_0:
3781 	case WM_T_82542_2_1:
3782 	case WM_T_82543:
3783 	case WM_T_82540:
3784 	case WM_T_82545:
3785 	case WM_T_82546:
3786 	case WM_T_82571:
3787 	case WM_T_82572:
3788 	case WM_T_82573:
3789 	case WM_T_82574:
3790 	case WM_T_82575:
3791 	case WM_T_82576:
3792 	case WM_T_82583:
3793 	default:
3794 		/* Everything else can safely use the documented method. */
3795 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3796 		break;
3797 	}
3798 
3799 	/* Must release the MDIO ownership after MAC reset */
3800 	switch (sc->sc_type) {
3801 	case WM_T_82573:
3802 	case WM_T_82574:
3803 	case WM_T_82583:
3804 		if (error == 0)
3805 			wm_put_hw_semaphore_82573(sc);
3806 		break;
3807 	default:
3808 		break;
3809 	}
3810 
3811 	if (phy_reset != 0)
3812 		wm_get_cfg_done(sc);
3813 
3814 	/* reload EEPROM */
3815 	switch (sc->sc_type) {
3816 	case WM_T_82542_2_0:
3817 	case WM_T_82542_2_1:
3818 	case WM_T_82543:
3819 	case WM_T_82544:
3820 		delay(10);
3821 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3822 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3823 		CSR_WRITE_FLUSH(sc);
3824 		delay(2000);
3825 		break;
3826 	case WM_T_82540:
3827 	case WM_T_82545:
3828 	case WM_T_82545_3:
3829 	case WM_T_82546:
3830 	case WM_T_82546_3:
3831 		delay(5*1000);
3832 		/* XXX Disable HW ARPs on ASF enabled adapters */
3833 		break;
3834 	case WM_T_82541:
3835 	case WM_T_82541_2:
3836 	case WM_T_82547:
3837 	case WM_T_82547_2:
3838 		delay(20000);
3839 		/* XXX Disable HW ARPs on ASF enabled adapters */
3840 		break;
3841 	case WM_T_82571:
3842 	case WM_T_82572:
3843 	case WM_T_82573:
3844 	case WM_T_82574:
3845 	case WM_T_82583:
3846 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3847 			delay(10);
3848 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3849 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3850 			CSR_WRITE_FLUSH(sc);
3851 		}
3852 		/* check EECD_EE_AUTORD */
3853 		wm_get_auto_rd_done(sc);
3854 		/*
3855 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3856 		 * is set.
3857 		 */
3858 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3859 		    || (sc->sc_type == WM_T_82583))
3860 			delay(25*1000);
3861 		break;
3862 	case WM_T_82575:
3863 	case WM_T_82576:
3864 	case WM_T_82580:
3865 	case WM_T_I350:
3866 	case WM_T_I354:
3867 	case WM_T_I210:
3868 	case WM_T_I211:
3869 	case WM_T_80003:
3870 		/* check EECD_EE_AUTORD */
3871 		wm_get_auto_rd_done(sc);
3872 		break;
3873 	case WM_T_ICH8:
3874 	case WM_T_ICH9:
3875 	case WM_T_ICH10:
3876 	case WM_T_PCH:
3877 	case WM_T_PCH2:
3878 	case WM_T_PCH_LPT:
3879 		break;
3880 	default:
3881 		panic("%s: unknown type\n", __func__);
3882 	}
3883 
3884 	/* Check whether EEPROM is present or not */
3885 	switch (sc->sc_type) {
3886 	case WM_T_82575:
3887 	case WM_T_82576:
3888 	case WM_T_82580:
3889 	case WM_T_I350:
3890 	case WM_T_I354:
3891 	case WM_T_ICH8:
3892 	case WM_T_ICH9:
3893 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
3894 			/* Not found */
3895 			sc->sc_flags |= WM_F_EEPROM_INVALID;
3896 			if (sc->sc_type == WM_T_82575)
3897 				wm_reset_init_script_82575(sc);
3898 		}
3899 		break;
3900 	default:
3901 		break;
3902 	}
3903 
3904 	if ((sc->sc_type == WM_T_82580)
3905 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
3906 		/* clear global device reset status bit */
3907 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
3908 	}
3909 
3910 	/* Clear any pending interrupt events. */
3911 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3912 	reg = CSR_READ(sc, WMREG_ICR);
3913 	if (sc->sc_nintrs > 1) {
3914 		if (sc->sc_type != WM_T_82574) {
3915 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3916 			CSR_WRITE(sc, WMREG_EIAC, 0);
3917 		} else
3918 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3919 	}
3920 
3921 	/* reload sc_ctrl */
3922 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
3923 
3924 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
3925 		wm_set_eee_i350(sc);
3926 
3927 	/* dummy read from WUC */
3928 	if (sc->sc_type == WM_T_PCH)
3929 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
3930 	/*
3931 	 * For PCH, this write will make sure that any noise will be detected
3932 	 * as a CRC error and be dropped rather than show up as a bad packet
3933 	 * to the DMA engine
3934 	 */
3935 	if (sc->sc_type == WM_T_PCH)
3936 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
3937 
3938 	if (sc->sc_type >= WM_T_82544)
3939 		CSR_WRITE(sc, WMREG_WUC, 0);
3940 
3941 	wm_reset_mdicnfg_82580(sc);
3942 
3943 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
3944 		wm_pll_workaround_i210(sc);
3945 }
3946 
3947 /*
3948  * wm_add_rxbuf:
3949  *
3950  *	Add a receive buffer to the indiciated descriptor.
3951  */
3952 static int
3953 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
3954 {
3955 	struct wm_softc *sc = rxq->rxq_sc;
3956 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
3957 	struct mbuf *m;
3958 	int error;
3959 
3960 	KASSERT(WM_RX_LOCKED(rxq));
3961 
3962 	MGETHDR(m, M_DONTWAIT, MT_DATA);
3963 	if (m == NULL)
3964 		return ENOBUFS;
3965 
3966 	MCLGET(m, M_DONTWAIT);
3967 	if ((m->m_flags & M_EXT) == 0) {
3968 		m_freem(m);
3969 		return ENOBUFS;
3970 	}
3971 
3972 	if (rxs->rxs_mbuf != NULL)
3973 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
3974 
3975 	rxs->rxs_mbuf = m;
3976 
3977 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3978 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
3979 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
3980 	if (error) {
3981 		/* XXX XXX XXX */
3982 		aprint_error_dev(sc->sc_dev,
3983 		    "unable to load rx DMA map %d, error = %d\n",
3984 		    idx, error);
3985 		panic("wm_add_rxbuf");
3986 	}
3987 
3988 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
3989 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
3990 
3991 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3992 		if ((sc->sc_rctl & RCTL_EN) != 0)
3993 			wm_init_rxdesc(rxq, idx);
3994 	} else
3995 		wm_init_rxdesc(rxq, idx);
3996 
3997 	return 0;
3998 }
3999 
4000 /*
4001  * wm_rxdrain:
4002  *
4003  *	Drain the receive queue.
4004  */
4005 static void
4006 wm_rxdrain(struct wm_rxqueue *rxq)
4007 {
4008 	struct wm_softc *sc = rxq->rxq_sc;
4009 	struct wm_rxsoft *rxs;
4010 	int i;
4011 
4012 	KASSERT(WM_RX_LOCKED(rxq));
4013 
4014 	for (i = 0; i < WM_NRXDESC; i++) {
4015 		rxs = &rxq->rxq_soft[i];
4016 		if (rxs->rxs_mbuf != NULL) {
4017 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4018 			m_freem(rxs->rxs_mbuf);
4019 			rxs->rxs_mbuf = NULL;
4020 		}
4021 	}
4022 }
4023 
4024 
4025 /*
4026  * XXX copy from FreeBSD's sys/net/rss_config.c
4027  */
4028 /*
4029  * RSS secret key, intended to prevent attacks on load-balancing.  Its
4030  * effectiveness may be limited by algorithm choice and available entropy
4031  * during the boot.
4032  *
4033  * XXXRW: And that we don't randomize it yet!
4034  *
4035  * This is the default Microsoft RSS specification key which is also
4036  * the Chelsio T5 firmware default key.
4037  */
4038 #define RSS_KEYSIZE 40
4039 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4040 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4041 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4042 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4043 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4044 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4045 };
4046 
4047 /*
4048  * Caller must pass an array of size sizeof(rss_key).
4049  *
4050  * XXX
4051  * As if_ixgbe may use this function, this function should not be
4052  * if_wm specific function.
4053  */
4054 static void
4055 wm_rss_getkey(uint8_t *key)
4056 {
4057 
4058 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4059 }
4060 
4061 /*
4062  * Setup registers for RSS.
4063  *
4064  * XXX not yet VMDq support
4065  */
4066 static void
4067 wm_init_rss(struct wm_softc *sc)
4068 {
4069 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4070 	int i;
4071 
4072 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4073 
4074 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4075 		int qid, reta_ent;
4076 
4077 		qid  = i % sc->sc_nrxqueues;
4078 		switch(sc->sc_type) {
4079 		case WM_T_82574:
4080 			reta_ent = __SHIFTIN(qid,
4081 			    RETA_ENT_QINDEX_MASK_82574);
4082 			break;
4083 		case WM_T_82575:
4084 			reta_ent = __SHIFTIN(qid,
4085 			    RETA_ENT_QINDEX1_MASK_82575);
4086 			break;
4087 		default:
4088 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4089 			break;
4090 		}
4091 
4092 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4093 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4094 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4095 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4096 	}
4097 
4098 	wm_rss_getkey((uint8_t *)rss_key);
4099 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4100 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4101 
4102 	if (sc->sc_type == WM_T_82574)
4103 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4104 	else
4105 		mrqc = MRQC_ENABLE_RSS_MQ;
4106 
4107 	/* XXXX
4108 	 * The same as FreeBSD igb.
4109 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4110 	 */
4111 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4112 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4113 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4114 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4115 
4116 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4117 }
4118 
4119 /*
4120  * Adjust TX and RX queue numbers which the system actulally uses.
4121  *
4122  * The numbers are affected by below parameters.
4123  *     - The nubmer of hardware queues
4124  *     - The number of MSI-X vectors (= "nvectors" argument)
4125  *     - ncpu
4126  */
4127 static void
4128 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4129 {
4130 	int hw_ntxqueues, hw_nrxqueues;
4131 
4132 	if (nvectors < 3) {
4133 		sc->sc_ntxqueues = 1;
4134 		sc->sc_nrxqueues = 1;
4135 		return;
4136 	}
4137 
4138 	switch(sc->sc_type) {
4139 	case WM_T_82572:
4140 		hw_ntxqueues = 2;
4141 		hw_nrxqueues = 2;
4142 		break;
4143 	case WM_T_82574:
4144 		hw_ntxqueues = 2;
4145 		hw_nrxqueues = 2;
4146 		break;
4147 	case WM_T_82575:
4148 		hw_ntxqueues = 4;
4149 		hw_nrxqueues = 4;
4150 		break;
4151 	case WM_T_82576:
4152 		hw_ntxqueues = 16;
4153 		hw_nrxqueues = 16;
4154 		break;
4155 	case WM_T_82580:
4156 	case WM_T_I350:
4157 	case WM_T_I354:
4158 		hw_ntxqueues = 8;
4159 		hw_nrxqueues = 8;
4160 		break;
4161 	case WM_T_I210:
4162 		hw_ntxqueues = 4;
4163 		hw_nrxqueues = 4;
4164 		break;
4165 	case WM_T_I211:
4166 		hw_ntxqueues = 2;
4167 		hw_nrxqueues = 2;
4168 		break;
4169 		/*
4170 		 * As below ethernet controllers does not support MSI-X,
4171 		 * this driver let them not use multiqueue.
4172 		 *     - WM_T_80003
4173 		 *     - WM_T_ICH8
4174 		 *     - WM_T_ICH9
4175 		 *     - WM_T_ICH10
4176 		 *     - WM_T_PCH
4177 		 *     - WM_T_PCH2
4178 		 *     - WM_T_PCH_LPT
4179 		 */
4180 	default:
4181 		hw_ntxqueues = 1;
4182 		hw_nrxqueues = 1;
4183 		break;
4184 	}
4185 
4186 	/*
4187 	 * As queues more then MSI-X vectors cannot improve scaling, we limit
4188 	 * the number of queues used actually.
4189 	 *
4190 	 * XXX
4191 	 * Currently, we separate TX queue interrupts and RX queue interrupts.
4192 	 * Howerver, the number of MSI-X vectors of recent controllers (such as
4193 	 * I354) expects that drivers bundle a TX queue interrupt and a RX
4194 	 * interrupt to one interrupt. e.g. FreeBSD's igb deals interrupts in
4195 	 * such a way.
4196 	 */
4197 	if (nvectors < hw_ntxqueues + hw_nrxqueues + 1) {
4198 		sc->sc_ntxqueues = (nvectors - 1) / 2;
4199 		sc->sc_nrxqueues = (nvectors - 1) / 2;
4200 	} else {
4201 		sc->sc_ntxqueues = hw_ntxqueues;
4202 		sc->sc_nrxqueues = hw_nrxqueues;
4203 	}
4204 
4205 	/*
4206 	 * As queues more then cpus cannot improve scaling, we limit
4207 	 * the number of queues used actually.
4208 	 */
4209 	if (ncpu < sc->sc_ntxqueues)
4210 		sc->sc_ntxqueues = ncpu;
4211 	if (ncpu < sc->sc_nrxqueues)
4212 		sc->sc_nrxqueues = ncpu;
4213 
4214 	/* XXX Currently, this driver supports RX multiqueue only. */
4215 	sc->sc_ntxqueues = 1;
4216 }
4217 
4218 /*
4219  * Both single interrupt MSI and INTx can use this function.
4220  */
4221 static int
4222 wm_setup_legacy(struct wm_softc *sc)
4223 {
4224 	pci_chipset_tag_t pc = sc->sc_pc;
4225 	const char *intrstr = NULL;
4226 	char intrbuf[PCI_INTRSTR_LEN];
4227 	int error;
4228 
4229 	error = wm_alloc_txrx_queues(sc);
4230 	if (error) {
4231 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4232 		    error);
4233 		return ENOMEM;
4234 	}
4235 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4236 	    sizeof(intrbuf));
4237 #ifdef WM_MPSAFE
4238 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4239 #endif
4240 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4241 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4242 	if (sc->sc_ihs[0] == NULL) {
4243 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4244 		    (pci_intr_type(sc->sc_intrs[0])
4245 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4246 		return ENOMEM;
4247 	}
4248 
4249 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4250 	sc->sc_nintrs = 1;
4251 	return 0;
4252 }
4253 
4254 static int
4255 wm_setup_msix(struct wm_softc *sc)
4256 {
4257 	void *vih;
4258 	kcpuset_t *affinity;
4259 	int qidx, error, intr_idx, tx_established, rx_established;
4260 	pci_chipset_tag_t pc = sc->sc_pc;
4261 	const char *intrstr = NULL;
4262 	char intrbuf[PCI_INTRSTR_LEN];
4263 	char intr_xname[INTRDEVNAMEBUF];
4264 	/*
4265 	 * To avoid other devices' interrupts, the affinity of Tx/Rx interrupts
4266 	 * start from CPU#1.
4267 	 */
4268 	int affinity_offset = 1;
4269 
4270 	error = wm_alloc_txrx_queues(sc);
4271 	if (error) {
4272 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4273 		    error);
4274 		return ENOMEM;
4275 	}
4276 
4277 	kcpuset_create(&affinity, false);
4278 	intr_idx = 0;
4279 
4280 	/*
4281 	 * TX
4282 	 */
4283 	tx_established = 0;
4284 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4285 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
4286 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
4287 
4288 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4289 		    sizeof(intrbuf));
4290 #ifdef WM_MPSAFE
4291 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4292 		    PCI_INTR_MPSAFE, true);
4293 #endif
4294 		memset(intr_xname, 0, sizeof(intr_xname));
4295 		snprintf(intr_xname, sizeof(intr_xname), "%sTX%d",
4296 		    device_xname(sc->sc_dev), qidx);
4297 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4298 		    IPL_NET, wm_txintr_msix, txq, intr_xname);
4299 		if (vih == NULL) {
4300 			aprint_error_dev(sc->sc_dev,
4301 			    "unable to establish MSI-X(for TX)%s%s\n",
4302 			    intrstr ? " at " : "",
4303 			    intrstr ? intrstr : "");
4304 
4305 			goto fail_0;
4306 		}
4307 		kcpuset_zero(affinity);
4308 		/* Round-robin affinity */
4309 		kcpuset_set(affinity, affinity_to);
4310 		error = interrupt_distribute(vih, affinity, NULL);
4311 		if (error == 0) {
4312 			aprint_normal_dev(sc->sc_dev,
4313 			    "for TX interrupting at %s affinity to %u\n",
4314 			    intrstr, affinity_to);
4315 		} else {
4316 			aprint_normal_dev(sc->sc_dev,
4317 			    "for TX interrupting at %s\n", intrstr);
4318 		}
4319 		sc->sc_ihs[intr_idx] = vih;
4320 		txq->txq_id = qidx;
4321 		txq->txq_intr_idx = intr_idx;
4322 
4323 		tx_established++;
4324 		intr_idx++;
4325 	}
4326 
4327 	/*
4328 	 * RX
4329 	 */
4330 	rx_established = 0;
4331 	for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4332 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4333 		int affinity_to = (affinity_offset + intr_idx) % ncpu;
4334 
4335 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4336 		    sizeof(intrbuf));
4337 #ifdef WM_MPSAFE
4338 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4339 		    PCI_INTR_MPSAFE, true);
4340 #endif
4341 		memset(intr_xname, 0, sizeof(intr_xname));
4342 		snprintf(intr_xname, sizeof(intr_xname), "%sRX%d",
4343 		    device_xname(sc->sc_dev), qidx);
4344 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4345 		    IPL_NET, wm_rxintr_msix, rxq, intr_xname);
4346 		if (vih == NULL) {
4347 			aprint_error_dev(sc->sc_dev,
4348 			    "unable to establish MSI-X(for RX)%s%s\n",
4349 			    intrstr ? " at " : "",
4350 			    intrstr ? intrstr : "");
4351 
4352 			goto fail_1;
4353 		}
4354 		kcpuset_zero(affinity);
4355 		/* Round-robin affinity */
4356 		kcpuset_set(affinity, affinity_to);
4357 		error = interrupt_distribute(vih, affinity, NULL);
4358 		if (error == 0) {
4359 			aprint_normal_dev(sc->sc_dev,
4360 			    "for RX interrupting at %s affinity to %u\n",
4361 			    intrstr, affinity_to);
4362 		} else {
4363 			aprint_normal_dev(sc->sc_dev,
4364 			    "for RX interrupting at %s\n", intrstr);
4365 		}
4366 		sc->sc_ihs[intr_idx] = vih;
4367 		rxq->rxq_id = qidx;
4368 		rxq->rxq_intr_idx = intr_idx;
4369 
4370 		rx_established++;
4371 		intr_idx++;
4372 	}
4373 
4374 	/*
4375 	 * LINK
4376 	 */
4377 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4378 	    sizeof(intrbuf));
4379 #ifdef WM_MPSAFE
4380 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4381 #endif
4382 	memset(intr_xname, 0, sizeof(intr_xname));
4383 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4384 	    device_xname(sc->sc_dev));
4385 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4386 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
4387 	if (vih == NULL) {
4388 		aprint_error_dev(sc->sc_dev,
4389 		    "unable to establish MSI-X(for LINK)%s%s\n",
4390 		    intrstr ? " at " : "",
4391 		    intrstr ? intrstr : "");
4392 
4393 		goto fail_1;
4394 	}
4395 	/* keep default affinity to LINK interrupt */
4396 	aprint_normal_dev(sc->sc_dev,
4397 	    "for LINK interrupting at %s\n", intrstr);
4398 	sc->sc_ihs[intr_idx] = vih;
4399 	sc->sc_link_intr_idx = intr_idx;
4400 
4401 	sc->sc_nintrs = sc->sc_ntxqueues + sc->sc_nrxqueues + 1;
4402 	kcpuset_destroy(affinity);
4403 	return 0;
4404 
4405  fail_1:
4406 	for (qidx = 0; qidx < rx_established; qidx++) {
4407 		struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4408 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[rxq->rxq_intr_idx]);
4409 		sc->sc_ihs[rxq->rxq_intr_idx] = NULL;
4410 	}
4411  fail_0:
4412 	for (qidx = 0; qidx < tx_established; qidx++) {
4413 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
4414 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[txq->txq_intr_idx]);
4415 		sc->sc_ihs[txq->txq_intr_idx] = NULL;
4416 	}
4417 
4418 	kcpuset_destroy(affinity);
4419 	return ENOMEM;
4420 }
4421 
4422 /*
4423  * wm_init:		[ifnet interface function]
4424  *
4425  *	Initialize the interface.
4426  */
4427 static int
4428 wm_init(struct ifnet *ifp)
4429 {
4430 	struct wm_softc *sc = ifp->if_softc;
4431 	int ret;
4432 
4433 	WM_CORE_LOCK(sc);
4434 	ret = wm_init_locked(ifp);
4435 	WM_CORE_UNLOCK(sc);
4436 
4437 	return ret;
4438 }
4439 
4440 static int
4441 wm_init_locked(struct ifnet *ifp)
4442 {
4443 	struct wm_softc *sc = ifp->if_softc;
4444 	int i, j, trynum, error = 0;
4445 	uint32_t reg;
4446 
4447 	KASSERT(WM_CORE_LOCKED(sc));
4448 	/*
4449 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4450 	 * There is a small but measurable benefit to avoiding the adjusment
4451 	 * of the descriptor so that the headers are aligned, for normal mtu,
4452 	 * on such platforms.  One possibility is that the DMA itself is
4453 	 * slightly more efficient if the front of the entire packet (instead
4454 	 * of the front of the headers) is aligned.
4455 	 *
4456 	 * Note we must always set align_tweak to 0 if we are using
4457 	 * jumbo frames.
4458 	 */
4459 #ifdef __NO_STRICT_ALIGNMENT
4460 	sc->sc_align_tweak = 0;
4461 #else
4462 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4463 		sc->sc_align_tweak = 0;
4464 	else
4465 		sc->sc_align_tweak = 2;
4466 #endif /* __NO_STRICT_ALIGNMENT */
4467 
4468 	/* Cancel any pending I/O. */
4469 	wm_stop_locked(ifp, 0);
4470 
4471 	/* update statistics before reset */
4472 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4473 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4474 
4475 	/* Reset the chip to a known state. */
4476 	wm_reset(sc);
4477 
4478 	switch (sc->sc_type) {
4479 	case WM_T_82571:
4480 	case WM_T_82572:
4481 	case WM_T_82573:
4482 	case WM_T_82574:
4483 	case WM_T_82583:
4484 	case WM_T_80003:
4485 	case WM_T_ICH8:
4486 	case WM_T_ICH9:
4487 	case WM_T_ICH10:
4488 	case WM_T_PCH:
4489 	case WM_T_PCH2:
4490 	case WM_T_PCH_LPT:
4491 		/* AMT based hardware can now take control from firmware */
4492 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4493 			wm_get_hw_control(sc);
4494 		break;
4495 	default:
4496 		break;
4497 	}
4498 
4499 	/* Init hardware bits */
4500 	wm_initialize_hardware_bits(sc);
4501 
4502 	/* Reset the PHY. */
4503 	if (sc->sc_flags & WM_F_HAS_MII)
4504 		wm_gmii_reset(sc);
4505 
4506 	/* Calculate (E)ITR value */
4507 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4508 		sc->sc_itr = 450;	/* For EITR */
4509 	} else if (sc->sc_type >= WM_T_82543) {
4510 		/*
4511 		 * Set up the interrupt throttling register (units of 256ns)
4512 		 * Note that a footnote in Intel's documentation says this
4513 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4514 		 * or 10Mbit mode.  Empirically, it appears to be the case
4515 		 * that that is also true for the 1024ns units of the other
4516 		 * interrupt-related timer registers -- so, really, we ought
4517 		 * to divide this value by 4 when the link speed is low.
4518 		 *
4519 		 * XXX implement this division at link speed change!
4520 		 */
4521 
4522 		/*
4523 		 * For N interrupts/sec, set this value to:
4524 		 * 1000000000 / (N * 256).  Note that we set the
4525 		 * absolute and packet timer values to this value
4526 		 * divided by 4 to get "simple timer" behavior.
4527 		 */
4528 
4529 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4530 	}
4531 
4532 	error = wm_init_txrx_queues(sc);
4533 	if (error)
4534 		goto out;
4535 
4536 	/*
4537 	 * Clear out the VLAN table -- we don't use it (yet).
4538 	 */
4539 	CSR_WRITE(sc, WMREG_VET, 0);
4540 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4541 		trynum = 10; /* Due to hw errata */
4542 	else
4543 		trynum = 1;
4544 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4545 		for (j = 0; j < trynum; j++)
4546 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4547 
4548 	/*
4549 	 * Set up flow-control parameters.
4550 	 *
4551 	 * XXX Values could probably stand some tuning.
4552 	 */
4553 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4554 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4555 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)) {
4556 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4557 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4558 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4559 	}
4560 
4561 	sc->sc_fcrtl = FCRTL_DFLT;
4562 	if (sc->sc_type < WM_T_82543) {
4563 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4564 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4565 	} else {
4566 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4567 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4568 	}
4569 
4570 	if (sc->sc_type == WM_T_80003)
4571 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4572 	else
4573 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4574 
4575 	/* Writes the control register. */
4576 	wm_set_vlan(sc);
4577 
4578 	if (sc->sc_flags & WM_F_HAS_MII) {
4579 		int val;
4580 
4581 		switch (sc->sc_type) {
4582 		case WM_T_80003:
4583 		case WM_T_ICH8:
4584 		case WM_T_ICH9:
4585 		case WM_T_ICH10:
4586 		case WM_T_PCH:
4587 		case WM_T_PCH2:
4588 		case WM_T_PCH_LPT:
4589 			/*
4590 			 * Set the mac to wait the maximum time between each
4591 			 * iteration and increase the max iterations when
4592 			 * polling the phy; this fixes erroneous timeouts at
4593 			 * 10Mbps.
4594 			 */
4595 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4596 			    0xFFFF);
4597 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4598 			val |= 0x3F;
4599 			wm_kmrn_writereg(sc,
4600 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4601 			break;
4602 		default:
4603 			break;
4604 		}
4605 
4606 		if (sc->sc_type == WM_T_80003) {
4607 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4608 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4609 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4610 
4611 			/* Bypass RX and TX FIFO's */
4612 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4613 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4614 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4615 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4616 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4617 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4618 		}
4619 	}
4620 #if 0
4621 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4622 #endif
4623 
4624 	/* Set up checksum offload parameters. */
4625 	reg = CSR_READ(sc, WMREG_RXCSUM);
4626 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4627 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4628 		reg |= RXCSUM_IPOFL;
4629 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4630 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4631 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4632 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4633 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4634 
4635 	/* Set up MSI-X */
4636 	if (sc->sc_nintrs > 1) {
4637 		uint32_t ivar;
4638 		struct wm_txqueue *txq;
4639 		struct wm_rxqueue *rxq;
4640 		int qid;
4641 
4642 		if (sc->sc_type == WM_T_82575) {
4643 			/* Interrupt control */
4644 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4645 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4646 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4647 
4648 			/* TX */
4649 			for (i = 0; i < sc->sc_ntxqueues; i++) {
4650 				txq = &sc->sc_txq[i];
4651 				CSR_WRITE(sc, WMREG_MSIXBM(txq->txq_intr_idx),
4652 				    EITR_TX_QUEUE(txq->txq_id));
4653 			}
4654 			/* RX */
4655 			for (i = 0; i < sc->sc_nrxqueues; i++) {
4656 				rxq = &sc->sc_rxq[i];
4657 				CSR_WRITE(sc, WMREG_MSIXBM(rxq->rxq_intr_idx),
4658 				    EITR_RX_QUEUE(rxq->rxq_id));
4659 			}
4660 			/* Link status */
4661 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4662 			    EITR_OTHER);
4663 		} else if (sc->sc_type == WM_T_82574) {
4664 			/* Interrupt control */
4665 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4666 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4667 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4668 
4669 			ivar = 0;
4670 			/* TX */
4671 			for (i = 0; i < sc->sc_ntxqueues; i++) {
4672 				txq = &sc->sc_txq[i];
4673 				ivar |= __SHIFTIN((IVAR_VALID_82574
4674 					| txq->txq_intr_idx),
4675 				    IVAR_TX_MASK_Q_82574(txq->txq_id));
4676 			}
4677 			/* RX */
4678 			for (i = 0; i < sc->sc_nrxqueues; i++) {
4679 				rxq = &sc->sc_rxq[i];
4680 				ivar |= __SHIFTIN((IVAR_VALID_82574
4681 					| rxq->rxq_intr_idx),
4682 				    IVAR_RX_MASK_Q_82574(rxq->rxq_id));
4683 			}
4684 			/* Link status */
4685 			ivar |= __SHIFTIN((IVAR_VALID_82574
4686 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4687 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4688 		} else {
4689 			/* Interrupt control */
4690 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4691 			    | GPIE_EIAME | GPIE_PBA);
4692 
4693 			switch (sc->sc_type) {
4694 			case WM_T_82580:
4695 			case WM_T_I350:
4696 			case WM_T_I354:
4697 			case WM_T_I210:
4698 			case WM_T_I211:
4699 				/* TX */
4700 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4701 					txq = &sc->sc_txq[i];
4702 					qid = txq->txq_id;
4703 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4704 					ivar &= ~IVAR_TX_MASK_Q(qid);
4705 					ivar |= __SHIFTIN((txq->txq_intr_idx
4706 						| IVAR_VALID),
4707 					    IVAR_TX_MASK_Q(qid));
4708 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4709 				}
4710 
4711 				/* RX */
4712 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4713 					rxq = &sc->sc_rxq[i];
4714 					qid = rxq->rxq_id;
4715 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4716 					ivar &= ~IVAR_RX_MASK_Q(qid);
4717 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
4718 						| IVAR_VALID),
4719 					    IVAR_RX_MASK_Q(qid));
4720 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4721 				}
4722 				break;
4723 			case WM_T_82576:
4724 				/* TX */
4725 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4726 					txq = &sc->sc_txq[i];
4727 					qid = txq->txq_id;
4728 					ivar = CSR_READ(sc,
4729 					    WMREG_IVAR_Q_82576(qid));
4730 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4731 					ivar |= __SHIFTIN((txq->txq_intr_idx
4732 						| IVAR_VALID),
4733 					    IVAR_TX_MASK_Q_82576(qid));
4734 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4735 					    ivar);
4736 				}
4737 
4738 				/* RX */
4739 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4740 					rxq = &sc->sc_rxq[i];
4741 					qid = rxq->rxq_id;
4742 					ivar = CSR_READ(sc,
4743 					    WMREG_IVAR_Q_82576(qid));
4744 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4745 					ivar |= __SHIFTIN((rxq->rxq_intr_idx
4746 						| IVAR_VALID),
4747 					    IVAR_RX_MASK_Q_82576(qid));
4748 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4749 					    ivar);
4750 				}
4751 				break;
4752 			default:
4753 				break;
4754 			}
4755 
4756 			/* Link status */
4757 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4758 			    IVAR_MISC_OTHER);
4759 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4760 		}
4761 
4762 		if (sc->sc_nrxqueues > 1) {
4763 			wm_init_rss(sc);
4764 
4765 			/*
4766 			** NOTE: Receive Full-Packet Checksum Offload
4767 			** is mutually exclusive with Multiqueue. However
4768 			** this is not the same as TCP/IP checksums which
4769 			** still work.
4770 			*/
4771 			reg = CSR_READ(sc, WMREG_RXCSUM);
4772 			reg |= RXCSUM_PCSD;
4773 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
4774 		}
4775 	}
4776 
4777 	/* Set up the interrupt registers. */
4778 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4779 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4780 	    ICR_RXO | ICR_RXT0;
4781 	if (sc->sc_nintrs > 1) {
4782 		uint32_t mask;
4783 		struct wm_txqueue *txq;
4784 		struct wm_rxqueue *rxq;
4785 
4786 		switch (sc->sc_type) {
4787 		case WM_T_82574:
4788 			CSR_WRITE(sc, WMREG_EIAC_82574,
4789 			    WMREG_EIAC_82574_MSIX_MASK);
4790 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4791 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4792 			break;
4793 		default:
4794 			if (sc->sc_type == WM_T_82575) {
4795 				mask = 0;
4796 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4797 					txq = &sc->sc_txq[i];
4798 					mask |= EITR_TX_QUEUE(txq->txq_id);
4799 				}
4800 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4801 					rxq = &sc->sc_rxq[i];
4802 					mask |= EITR_RX_QUEUE(rxq->rxq_id);
4803 				}
4804 				mask |= EITR_OTHER;
4805 			} else {
4806 				mask = 0;
4807 				for (i = 0; i < sc->sc_ntxqueues; i++) {
4808 					txq = &sc->sc_txq[i];
4809 					mask |= 1 << txq->txq_intr_idx;
4810 				}
4811 				for (i = 0; i < sc->sc_nrxqueues; i++) {
4812 					rxq = &sc->sc_rxq[i];
4813 					mask |= 1 << rxq->rxq_intr_idx;
4814 				}
4815 				mask |= 1 << sc->sc_link_intr_idx;
4816 			}
4817 			CSR_WRITE(sc, WMREG_EIAC, mask);
4818 			CSR_WRITE(sc, WMREG_EIAM, mask);
4819 			CSR_WRITE(sc, WMREG_EIMS, mask);
4820 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4821 			break;
4822 		}
4823 	} else
4824 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4825 
4826 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4827 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4828 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
4829 		reg = CSR_READ(sc, WMREG_KABGTXD);
4830 		reg |= KABGTXD_BGSQLBIAS;
4831 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4832 	}
4833 
4834 	/* Set up the inter-packet gap. */
4835 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4836 
4837 	if (sc->sc_type >= WM_T_82543) {
4838 		/*
4839 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
4840 		 * the multi queue function with MSI-X.
4841 		 */
4842 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4843 			int qidx;
4844 			for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
4845 				struct wm_txqueue *txq = &sc->sc_txq[qidx];
4846 				CSR_WRITE(sc, WMREG_EITR(txq->txq_intr_idx),
4847 				    sc->sc_itr);
4848 			}
4849 			for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4850 				struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4851 				CSR_WRITE(sc, WMREG_EITR(rxq->rxq_intr_idx),
4852 				    sc->sc_itr);
4853 			}
4854 			/*
4855 			 * Link interrupts occur much less than TX
4856 			 * interrupts and RX interrupts. So, we don't
4857 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4858 			 * FreeBSD's if_igb.
4859 			 */
4860 		} else
4861 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4862 	}
4863 
4864 	/* Set the VLAN ethernetype. */
4865 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4866 
4867 	/*
4868 	 * Set up the transmit control register; we start out with
4869 	 * a collision distance suitable for FDX, but update it whe
4870 	 * we resolve the media type.
4871 	 */
4872 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4873 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4874 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4875 	if (sc->sc_type >= WM_T_82571)
4876 		sc->sc_tctl |= TCTL_MULR;
4877 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4878 
4879 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4880 		/* Write TDT after TCTL.EN is set. See the document. */
4881 		CSR_WRITE(sc, WMREG_TDT(0), 0);
4882 	}
4883 
4884 	if (sc->sc_type == WM_T_80003) {
4885 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4886 		reg &= ~TCTL_EXT_GCEX_MASK;
4887 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4888 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4889 	}
4890 
4891 	/* Set the media. */
4892 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4893 		goto out;
4894 
4895 	/* Configure for OS presence */
4896 	wm_init_manageability(sc);
4897 
4898 	/*
4899 	 * Set up the receive control register; we actually program
4900 	 * the register when we set the receive filter.  Use multicast
4901 	 * address offset type 0.
4902 	 *
4903 	 * Only the i82544 has the ability to strip the incoming
4904 	 * CRC, so we don't enable that feature.
4905 	 */
4906 	sc->sc_mchash_type = 0;
4907 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4908 	    | RCTL_MO(sc->sc_mchash_type);
4909 
4910 	/*
4911 	 * The I350 has a bug where it always strips the CRC whether
4912 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4913 	 */
4914 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4915 	    || (sc->sc_type == WM_T_I210))
4916 		sc->sc_rctl |= RCTL_SECRC;
4917 
4918 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4919 	    && (ifp->if_mtu > ETHERMTU)) {
4920 		sc->sc_rctl |= RCTL_LPE;
4921 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4922 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4923 	}
4924 
4925 	if (MCLBYTES == 2048) {
4926 		sc->sc_rctl |= RCTL_2k;
4927 	} else {
4928 		if (sc->sc_type >= WM_T_82543) {
4929 			switch (MCLBYTES) {
4930 			case 4096:
4931 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4932 				break;
4933 			case 8192:
4934 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4935 				break;
4936 			case 16384:
4937 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4938 				break;
4939 			default:
4940 				panic("wm_init: MCLBYTES %d unsupported",
4941 				    MCLBYTES);
4942 				break;
4943 			}
4944 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4945 	}
4946 
4947 	/* Set the receive filter. */
4948 	wm_set_filter(sc);
4949 
4950 	/* Enable ECC */
4951 	switch (sc->sc_type) {
4952 	case WM_T_82571:
4953 		reg = CSR_READ(sc, WMREG_PBA_ECC);
4954 		reg |= PBA_ECC_CORR_EN;
4955 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4956 		break;
4957 	case WM_T_PCH_LPT:
4958 		reg = CSR_READ(sc, WMREG_PBECCSTS);
4959 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4960 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4961 
4962 		reg = CSR_READ(sc, WMREG_CTRL);
4963 		reg |= CTRL_MEHE;
4964 		CSR_WRITE(sc, WMREG_CTRL, reg);
4965 		break;
4966 	default:
4967 		break;
4968 	}
4969 
4970 	/* On 575 and later set RDT only if RX enabled */
4971 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4972 		int qidx;
4973 		for (qidx = 0; qidx < sc->sc_nrxqueues; qidx++) {
4974 			struct wm_rxqueue *rxq = &sc->sc_rxq[qidx];
4975 			for (i = 0; i < WM_NRXDESC; i++) {
4976 				WM_RX_LOCK(rxq);
4977 				wm_init_rxdesc(rxq, i);
4978 				WM_RX_UNLOCK(rxq);
4979 
4980 			}
4981 		}
4982 	}
4983 
4984 	sc->sc_stopping = false;
4985 
4986 	/* Start the one second link check clock. */
4987 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
4988 
4989 	/* ...all done! */
4990 	ifp->if_flags |= IFF_RUNNING;
4991 	ifp->if_flags &= ~IFF_OACTIVE;
4992 
4993  out:
4994 	sc->sc_if_flags = ifp->if_flags;
4995 	if (error)
4996 		log(LOG_ERR, "%s: interface not running\n",
4997 		    device_xname(sc->sc_dev));
4998 	return error;
4999 }
5000 
5001 /*
5002  * wm_stop:		[ifnet interface function]
5003  *
5004  *	Stop transmission on the interface.
5005  */
5006 static void
5007 wm_stop(struct ifnet *ifp, int disable)
5008 {
5009 	struct wm_softc *sc = ifp->if_softc;
5010 
5011 	WM_CORE_LOCK(sc);
5012 	wm_stop_locked(ifp, disable);
5013 	WM_CORE_UNLOCK(sc);
5014 }
5015 
5016 static void
5017 wm_stop_locked(struct ifnet *ifp, int disable)
5018 {
5019 	struct wm_softc *sc = ifp->if_softc;
5020 	struct wm_txsoft *txs;
5021 	int i, qidx;
5022 
5023 	KASSERT(WM_CORE_LOCKED(sc));
5024 
5025 	sc->sc_stopping = true;
5026 
5027 	/* Stop the one second clock. */
5028 	callout_stop(&sc->sc_tick_ch);
5029 
5030 	/* Stop the 82547 Tx FIFO stall check timer. */
5031 	if (sc->sc_type == WM_T_82547)
5032 		callout_stop(&sc->sc_txfifo_ch);
5033 
5034 	if (sc->sc_flags & WM_F_HAS_MII) {
5035 		/* Down the MII. */
5036 		mii_down(&sc->sc_mii);
5037 	} else {
5038 #if 0
5039 		/* Should we clear PHY's status properly? */
5040 		wm_reset(sc);
5041 #endif
5042 	}
5043 
5044 	/* Stop the transmit and receive processes. */
5045 	CSR_WRITE(sc, WMREG_TCTL, 0);
5046 	CSR_WRITE(sc, WMREG_RCTL, 0);
5047 	sc->sc_rctl &= ~RCTL_EN;
5048 
5049 	/*
5050 	 * Clear the interrupt mask to ensure the device cannot assert its
5051 	 * interrupt line.
5052 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5053 	 * service any currently pending or shared interrupt.
5054 	 */
5055 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5056 	sc->sc_icr = 0;
5057 	if (sc->sc_nintrs > 1) {
5058 		if (sc->sc_type != WM_T_82574) {
5059 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5060 			CSR_WRITE(sc, WMREG_EIAC, 0);
5061 		} else
5062 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5063 	}
5064 
5065 	/* Release any queued transmit buffers. */
5066 	for (qidx = 0; qidx < sc->sc_ntxqueues; qidx++) {
5067 		struct wm_txqueue *txq = &sc->sc_txq[qidx];
5068 		WM_TX_LOCK(txq);
5069 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5070 			txs = &txq->txq_soft[i];
5071 			if (txs->txs_mbuf != NULL) {
5072 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5073 				m_freem(txs->txs_mbuf);
5074 				txs->txs_mbuf = NULL;
5075 			}
5076 		}
5077 		WM_TX_UNLOCK(txq);
5078 	}
5079 
5080 	/* Mark the interface as down and cancel the watchdog timer. */
5081 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5082 	ifp->if_timer = 0;
5083 
5084 	if (disable) {
5085 		for (i = 0; i < sc->sc_nrxqueues; i++) {
5086 			struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5087 			WM_RX_LOCK(rxq);
5088 			wm_rxdrain(rxq);
5089 			WM_RX_UNLOCK(rxq);
5090 		}
5091 	}
5092 
5093 #if 0 /* notyet */
5094 	if (sc->sc_type >= WM_T_82544)
5095 		CSR_WRITE(sc, WMREG_WUC, 0);
5096 #endif
5097 }
5098 
5099 static void
5100 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5101 {
5102 	struct mbuf *m;
5103 	int i;
5104 
5105 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5106 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5107 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5108 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5109 		    m->m_data, m->m_len, m->m_flags);
5110 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5111 	    i, i == 1 ? "" : "s");
5112 }
5113 
5114 /*
5115  * wm_82547_txfifo_stall:
5116  *
5117  *	Callout used to wait for the 82547 Tx FIFO to drain,
5118  *	reset the FIFO pointers, and restart packet transmission.
5119  */
5120 static void
5121 wm_82547_txfifo_stall(void *arg)
5122 {
5123 	struct wm_softc *sc = arg;
5124 	struct wm_txqueue *txq = sc->sc_txq;
5125 #ifndef WM_MPSAFE
5126 	int s;
5127 
5128 	s = splnet();
5129 #endif
5130 	WM_TX_LOCK(txq);
5131 
5132 	if (sc->sc_stopping)
5133 		goto out;
5134 
5135 	if (txq->txq_fifo_stall) {
5136 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5137 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5138 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5139 			/*
5140 			 * Packets have drained.  Stop transmitter, reset
5141 			 * FIFO pointers, restart transmitter, and kick
5142 			 * the packet queue.
5143 			 */
5144 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5145 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5146 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5147 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5148 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5149 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5150 			CSR_WRITE(sc, WMREG_TCTL, tctl);
5151 			CSR_WRITE_FLUSH(sc);
5152 
5153 			txq->txq_fifo_head = 0;
5154 			txq->txq_fifo_stall = 0;
5155 			wm_start_locked(&sc->sc_ethercom.ec_if);
5156 		} else {
5157 			/*
5158 			 * Still waiting for packets to drain; try again in
5159 			 * another tick.
5160 			 */
5161 			callout_schedule(&sc->sc_txfifo_ch, 1);
5162 		}
5163 	}
5164 
5165 out:
5166 	WM_TX_UNLOCK(txq);
5167 #ifndef WM_MPSAFE
5168 	splx(s);
5169 #endif
5170 }
5171 
5172 /*
5173  * wm_82547_txfifo_bugchk:
5174  *
5175  *	Check for bug condition in the 82547 Tx FIFO.  We need to
5176  *	prevent enqueueing a packet that would wrap around the end
5177  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
5178  *
5179  *	We do this by checking the amount of space before the end
5180  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
5181  *	the Tx FIFO, wait for all remaining packets to drain, reset
5182  *	the internal FIFO pointers to the beginning, and restart
5183  *	transmission on the interface.
5184  */
5185 #define	WM_FIFO_HDR		0x10
5186 #define	WM_82547_PAD_LEN	0x3e0
5187 static int
5188 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5189 {
5190 	struct wm_txqueue *txq = &sc->sc_txq[0];
5191 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
5192 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5193 
5194 	/* Just return if already stalled. */
5195 	if (txq->txq_fifo_stall)
5196 		return 1;
5197 
5198 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5199 		/* Stall only occurs in half-duplex mode. */
5200 		goto send_packet;
5201 	}
5202 
5203 	if (len >= WM_82547_PAD_LEN + space) {
5204 		txq->txq_fifo_stall = 1;
5205 		callout_schedule(&sc->sc_txfifo_ch, 1);
5206 		return 1;
5207 	}
5208 
5209  send_packet:
5210 	txq->txq_fifo_head += len;
5211 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
5212 		txq->txq_fifo_head -= txq->txq_fifo_size;
5213 
5214 	return 0;
5215 }
5216 
5217 static int
5218 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5219 {
5220 	int error;
5221 
5222 	/*
5223 	 * Allocate the control data structures, and create and load the
5224 	 * DMA map for it.
5225 	 *
5226 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5227 	 * memory.  So must Rx descriptors.  We simplify by allocating
5228 	 * both sets within the same 4G segment.
5229 	 */
5230 	if (sc->sc_type < WM_T_82544) {
5231 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
5232 		txq->txq_desc_size = sizeof(wiseman_txdesc_t) *WM_NTXDESC(txq);
5233 	} else {
5234 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
5235 		txq->txq_desc_size = sizeof(txdescs_t);
5236 	}
5237 
5238 	if ((error = bus_dmamem_alloc(sc->sc_dmat, txq->txq_desc_size,
5239 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5240 		    1, &txq->txq_desc_rseg, 0)) != 0) {
5241 		aprint_error_dev(sc->sc_dev,
5242 		    "unable to allocate TX control data, error = %d\n",
5243 		    error);
5244 		goto fail_0;
5245 	}
5246 
5247 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5248 		    txq->txq_desc_rseg, txq->txq_desc_size,
5249 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5250 		aprint_error_dev(sc->sc_dev,
5251 		    "unable to map TX control data, error = %d\n", error);
5252 		goto fail_1;
5253 	}
5254 
5255 	if ((error = bus_dmamap_create(sc->sc_dmat, txq->txq_desc_size, 1,
5256 		    txq->txq_desc_size, 0, 0, &txq->txq_desc_dmamap)) != 0) {
5257 		aprint_error_dev(sc->sc_dev,
5258 		    "unable to create TX control data DMA map, error = %d\n",
5259 		    error);
5260 		goto fail_2;
5261 	}
5262 
5263 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5264 		    txq->txq_descs_u, txq->txq_desc_size, NULL, 0)) != 0) {
5265 		aprint_error_dev(sc->sc_dev,
5266 		    "unable to load TX control data DMA map, error = %d\n",
5267 		    error);
5268 		goto fail_3;
5269 	}
5270 
5271 	return 0;
5272 
5273  fail_3:
5274 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5275  fail_2:
5276 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5277 	    txq->txq_desc_size);
5278  fail_1:
5279 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5280  fail_0:
5281 	return error;
5282 }
5283 
5284 static void
5285 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5286 {
5287 
5288 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5289 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5290 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5291 	    txq->txq_desc_size);
5292 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5293 }
5294 
5295 static int
5296 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5297 {
5298 	int error;
5299 
5300 	/*
5301 	 * Allocate the control data structures, and create and load the
5302 	 * DMA map for it.
5303 	 *
5304 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5305 	 * memory.  So must Rx descriptors.  We simplify by allocating
5306 	 * both sets within the same 4G segment.
5307 	 */
5308 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5309 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5310 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5311 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
5312 		aprint_error_dev(sc->sc_dev,
5313 		    "unable to allocate RX control data, error = %d\n",
5314 		    error);
5315 		goto fail_0;
5316 	}
5317 
5318 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5319 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5320 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5321 		aprint_error_dev(sc->sc_dev,
5322 		    "unable to map RX control data, error = %d\n", error);
5323 		goto fail_1;
5324 	}
5325 
5326 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5327 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5328 		aprint_error_dev(sc->sc_dev,
5329 		    "unable to create RX control data DMA map, error = %d\n",
5330 		    error);
5331 		goto fail_2;
5332 	}
5333 
5334 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5335 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5336 		aprint_error_dev(sc->sc_dev,
5337 		    "unable to load RX control data DMA map, error = %d\n",
5338 		    error);
5339 		goto fail_3;
5340 	}
5341 
5342 	return 0;
5343 
5344  fail_3:
5345 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5346  fail_2:
5347 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5348 	    rxq->rxq_desc_size);
5349  fail_1:
5350 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5351  fail_0:
5352 	return error;
5353 }
5354 
5355 static void
5356 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5357 {
5358 
5359 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5360 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5361 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5362 	    rxq->rxq_desc_size);
5363 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5364 }
5365 
5366 
5367 static int
5368 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5369 {
5370 	int i, error;
5371 
5372 	/* Create the transmit buffer DMA maps. */
5373 	WM_TXQUEUELEN(txq) =
5374 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5375 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5376 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5377 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5378 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5379 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
5380 			aprint_error_dev(sc->sc_dev,
5381 			    "unable to create Tx DMA map %d, error = %d\n",
5382 			    i, error);
5383 			goto fail;
5384 		}
5385 	}
5386 
5387 	return 0;
5388 
5389  fail:
5390 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5391 		if (txq->txq_soft[i].txs_dmamap != NULL)
5392 			bus_dmamap_destroy(sc->sc_dmat,
5393 			    txq->txq_soft[i].txs_dmamap);
5394 	}
5395 	return error;
5396 }
5397 
5398 static void
5399 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5400 {
5401 	int i;
5402 
5403 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5404 		if (txq->txq_soft[i].txs_dmamap != NULL)
5405 			bus_dmamap_destroy(sc->sc_dmat,
5406 			    txq->txq_soft[i].txs_dmamap);
5407 	}
5408 }
5409 
5410 static int
5411 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5412 {
5413 	int i, error;
5414 
5415 	/* Create the receive buffer DMA maps. */
5416 	for (i = 0; i < WM_NRXDESC; i++) {
5417 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5418 			    MCLBYTES, 0, 0,
5419 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5420 			aprint_error_dev(sc->sc_dev,
5421 			    "unable to create Rx DMA map %d error = %d\n",
5422 			    i, error);
5423 			goto fail;
5424 		}
5425 		rxq->rxq_soft[i].rxs_mbuf = NULL;
5426 	}
5427 
5428 	return 0;
5429 
5430  fail:
5431 	for (i = 0; i < WM_NRXDESC; i++) {
5432 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5433 			bus_dmamap_destroy(sc->sc_dmat,
5434 			    rxq->rxq_soft[i].rxs_dmamap);
5435 	}
5436 	return error;
5437 }
5438 
5439 static void
5440 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5441 {
5442 	int i;
5443 
5444 	for (i = 0; i < WM_NRXDESC; i++) {
5445 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5446 			bus_dmamap_destroy(sc->sc_dmat,
5447 			    rxq->rxq_soft[i].rxs_dmamap);
5448 	}
5449 }
5450 
5451 /*
5452  * wm_alloc_quques:
5453  *	Allocate {tx,rx}descs and {tx,rx} buffers
5454  */
5455 static int
5456 wm_alloc_txrx_queues(struct wm_softc *sc)
5457 {
5458 	int i, error, tx_done, rx_done;
5459 
5460 	/*
5461 	 * For transmission
5462 	 */
5463 	sc->sc_txq = kmem_zalloc(sizeof(struct wm_txqueue) * sc->sc_ntxqueues,
5464 	    KM_SLEEP);
5465 	if (sc->sc_txq == NULL) {
5466 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_txqueue\n");
5467 		error = ENOMEM;
5468 		goto fail_0;
5469 	}
5470 
5471 	error = 0;
5472 	tx_done = 0;
5473 	for (i = 0; i < sc->sc_ntxqueues; i++) {
5474 		struct wm_txqueue *txq = &sc->sc_txq[i];
5475 		txq->txq_sc = sc;
5476 #ifdef WM_MPSAFE
5477 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5478 #else
5479 		txq->txq_lock = NULL;
5480 #endif
5481 		error = wm_alloc_tx_descs(sc, txq);
5482 		if (error)
5483 			break;
5484 		error = wm_alloc_tx_buffer(sc, txq);
5485 		if (error) {
5486 			wm_free_tx_descs(sc, txq);
5487 			break;
5488 		}
5489 		tx_done++;
5490 	}
5491 	if (error)
5492 		goto fail_1;
5493 
5494 	/*
5495 	 * For recieve
5496 	 */
5497 	sc->sc_rxq = kmem_zalloc(sizeof(struct wm_rxqueue) * sc->sc_nrxqueues,
5498 	    KM_SLEEP);
5499 	if (sc->sc_rxq == NULL) {
5500 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_rxqueue\n");
5501 		error = ENOMEM;
5502 		goto fail_1;
5503 	}
5504 
5505 	error = 0;
5506 	rx_done = 0;
5507 	for (i = 0; i < sc->sc_nrxqueues; i++) {
5508 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5509 		rxq->rxq_sc = sc;
5510 #ifdef WM_MPSAFE
5511 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5512 #else
5513 		rxq->rxq_lock = NULL;
5514 #endif
5515 		error = wm_alloc_rx_descs(sc, rxq);
5516 		if (error)
5517 			break;
5518 
5519 		error = wm_alloc_rx_buffer(sc, rxq);
5520 		if (error) {
5521 			wm_free_rx_descs(sc, rxq);
5522 			break;
5523 		}
5524 
5525 		rx_done++;
5526 	}
5527 	if (error)
5528 		goto fail_2;
5529 
5530 	return 0;
5531 
5532  fail_2:
5533 	for (i = 0; i < rx_done; i++) {
5534 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5535 		wm_free_rx_buffer(sc, rxq);
5536 		wm_free_rx_descs(sc, rxq);
5537 		if (rxq->rxq_lock)
5538 			mutex_obj_free(rxq->rxq_lock);
5539 	}
5540 	kmem_free(sc->sc_rxq,
5541 	    sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5542  fail_1:
5543 	for (i = 0; i < tx_done; i++) {
5544 		struct wm_txqueue *txq = &sc->sc_txq[i];
5545 		wm_free_tx_buffer(sc, txq);
5546 		wm_free_tx_descs(sc, txq);
5547 		if (txq->txq_lock)
5548 			mutex_obj_free(txq->txq_lock);
5549 	}
5550 	kmem_free(sc->sc_txq,
5551 	    sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5552  fail_0:
5553 	return error;
5554 }
5555 
5556 /*
5557  * wm_free_quques:
5558  *	Free {tx,rx}descs and {tx,rx} buffers
5559  */
5560 static void
5561 wm_free_txrx_queues(struct wm_softc *sc)
5562 {
5563 	int i;
5564 
5565 	for (i = 0; i < sc->sc_nrxqueues; i++) {
5566 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5567 		wm_free_rx_buffer(sc, rxq);
5568 		wm_free_rx_descs(sc, rxq);
5569 		if (rxq->rxq_lock)
5570 			mutex_obj_free(rxq->rxq_lock);
5571 	}
5572 	kmem_free(sc->sc_rxq, sizeof(struct wm_rxqueue) * sc->sc_nrxqueues);
5573 
5574 	for (i = 0; i < sc->sc_ntxqueues; i++) {
5575 		struct wm_txqueue *txq = &sc->sc_txq[i];
5576 		wm_free_tx_buffer(sc, txq);
5577 		wm_free_tx_descs(sc, txq);
5578 		if (txq->txq_lock)
5579 			mutex_obj_free(txq->txq_lock);
5580 	}
5581 	kmem_free(sc->sc_txq, sizeof(struct wm_txqueue) * sc->sc_ntxqueues);
5582 }
5583 
5584 static void
5585 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5586 {
5587 
5588 	KASSERT(WM_TX_LOCKED(txq));
5589 
5590 	/* Initialize the transmit descriptor ring. */
5591 	memset(txq->txq_descs, 0, WM_TXDESCSIZE(txq));
5592 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5593 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5594 	txq->txq_free = WM_NTXDESC(txq);
5595 	txq->txq_next = 0;
5596 }
5597 
5598 static void
5599 wm_init_tx_regs(struct wm_softc *sc, struct wm_txqueue *txq)
5600 {
5601 
5602 	KASSERT(WM_TX_LOCKED(txq));
5603 
5604 	if (sc->sc_type < WM_T_82543) {
5605 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5606 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5607 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCSIZE(txq));
5608 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5609 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5610 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5611 	} else {
5612 		int qid = txq->txq_id;
5613 
5614 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5615 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5616 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCSIZE(txq));
5617 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
5618 
5619 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5620 			/*
5621 			 * Don't write TDT before TCTL.EN is set.
5622 			 * See the document.
5623 			 */
5624 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5625 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5626 			    | TXDCTL_WTHRESH(0));
5627 		else {
5628 			/* ITR / 4 */
5629 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5630 			if (sc->sc_type >= WM_T_82540) {
5631 				/* should be same */
5632 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5633 			}
5634 
5635 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
5636 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5637 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5638 		}
5639 	}
5640 }
5641 
5642 static void
5643 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5644 {
5645 	int i;
5646 
5647 	KASSERT(WM_TX_LOCKED(txq));
5648 
5649 	/* Initialize the transmit job descriptors. */
5650 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5651 		txq->txq_soft[i].txs_mbuf = NULL;
5652 	txq->txq_sfree = WM_TXQUEUELEN(txq);
5653 	txq->txq_snext = 0;
5654 	txq->txq_sdirty = 0;
5655 }
5656 
5657 static void
5658 wm_init_tx_queue(struct wm_softc *sc, struct wm_txqueue *txq)
5659 {
5660 
5661 	KASSERT(WM_TX_LOCKED(txq));
5662 
5663 	/*
5664 	 * Set up some register offsets that are different between
5665 	 * the i82542 and the i82543 and later chips.
5666 	 */
5667 	if (sc->sc_type < WM_T_82543)
5668 		txq->txq_tdt_reg = WMREG_OLD_TDT;
5669 	else
5670 		txq->txq_tdt_reg = WMREG_TDT(txq->txq_id);
5671 
5672 	wm_init_tx_descs(sc, txq);
5673 	wm_init_tx_regs(sc, txq);
5674 	wm_init_tx_buffer(sc, txq);
5675 }
5676 
5677 static void
5678 wm_init_rx_regs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5679 {
5680 
5681 	KASSERT(WM_RX_LOCKED(rxq));
5682 
5683 	/*
5684 	 * Initialize the receive descriptor and receive job
5685 	 * descriptor rings.
5686 	 */
5687 	if (sc->sc_type < WM_T_82543) {
5688 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5689 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5690 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5691 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5692 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5693 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5694 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5695 
5696 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5697 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5698 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5699 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5700 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5701 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5702 	} else {
5703 		int qid = rxq->rxq_id;
5704 
5705 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5706 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5707 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5708 
5709 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5710 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5711 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5712 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5713 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5714 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5715 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5716 			    | RXDCTL_WTHRESH(1));
5717 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5718 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5719 		} else {
5720 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5721 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5722 			/* ITR / 4 */
5723 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5724 			/* MUST be same */
5725 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5726 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5727 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5728 		}
5729 	}
5730 }
5731 
5732 static int
5733 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5734 {
5735 	struct wm_rxsoft *rxs;
5736 	int error, i;
5737 
5738 	KASSERT(WM_RX_LOCKED(rxq));
5739 
5740 	for (i = 0; i < WM_NRXDESC; i++) {
5741 		rxs = &rxq->rxq_soft[i];
5742 		if (rxs->rxs_mbuf == NULL) {
5743 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5744 				log(LOG_ERR, "%s: unable to allocate or map "
5745 				    "rx buffer %d, error = %d\n",
5746 				    device_xname(sc->sc_dev), i, error);
5747 				/*
5748 				 * XXX Should attempt to run with fewer receive
5749 				 * XXX buffers instead of just failing.
5750 				 */
5751 				wm_rxdrain(rxq);
5752 				return ENOMEM;
5753 			}
5754 		} else {
5755 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5756 				wm_init_rxdesc(rxq, i);
5757 			/*
5758 			 * For 82575 and newer device, the RX descriptors
5759 			 * must be initialized after the setting of RCTL.EN in
5760 			 * wm_set_filter()
5761 			 */
5762 		}
5763 	}
5764 	rxq->rxq_ptr = 0;
5765 	rxq->rxq_discard = 0;
5766 	WM_RXCHAIN_RESET(rxq);
5767 
5768 	return 0;
5769 }
5770 
5771 static int
5772 wm_init_rx_queue(struct wm_softc *sc, struct wm_rxqueue *rxq)
5773 {
5774 
5775 	KASSERT(WM_RX_LOCKED(rxq));
5776 
5777 	/*
5778 	 * Set up some register offsets that are different between
5779 	 * the i82542 and the i82543 and later chips.
5780 	 */
5781 	if (sc->sc_type < WM_T_82543)
5782 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5783 	else
5784 		rxq->rxq_rdt_reg = WMREG_RDT(rxq->rxq_id);
5785 
5786 	wm_init_rx_regs(sc, rxq);
5787 	return wm_init_rx_buffer(sc, rxq);
5788 }
5789 
5790 /*
5791  * wm_init_quques:
5792  *	Initialize {tx,rx}descs and {tx,rx} buffers
5793  */
5794 static int
5795 wm_init_txrx_queues(struct wm_softc *sc)
5796 {
5797 	int i, error;
5798 
5799 	for (i = 0; i < sc->sc_ntxqueues; i++) {
5800 		struct wm_txqueue *txq = &sc->sc_txq[i];
5801 		WM_TX_LOCK(txq);
5802 		wm_init_tx_queue(sc, txq);
5803 		WM_TX_UNLOCK(txq);
5804 	}
5805 
5806 	error = 0;
5807 	for (i = 0; i < sc->sc_nrxqueues; i++) {
5808 		struct wm_rxqueue *rxq = &sc->sc_rxq[i];
5809 		WM_RX_LOCK(rxq);
5810 		error = wm_init_rx_queue(sc, rxq);
5811 		WM_RX_UNLOCK(rxq);
5812 		if (error)
5813 			break;
5814 	}
5815 
5816 	return error;
5817 }
5818 
5819 /*
5820  * wm_tx_offload:
5821  *
5822  *	Set up TCP/IP checksumming parameters for the
5823  *	specified packet.
5824  */
5825 static int
5826 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
5827     uint8_t *fieldsp)
5828 {
5829 	struct wm_txqueue *txq = &sc->sc_txq[0];
5830 	struct mbuf *m0 = txs->txs_mbuf;
5831 	struct livengood_tcpip_ctxdesc *t;
5832 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
5833 	uint32_t ipcse;
5834 	struct ether_header *eh;
5835 	int offset, iphl;
5836 	uint8_t fields;
5837 
5838 	/*
5839 	 * XXX It would be nice if the mbuf pkthdr had offset
5840 	 * fields for the protocol headers.
5841 	 */
5842 
5843 	eh = mtod(m0, struct ether_header *);
5844 	switch (htons(eh->ether_type)) {
5845 	case ETHERTYPE_IP:
5846 	case ETHERTYPE_IPV6:
5847 		offset = ETHER_HDR_LEN;
5848 		break;
5849 
5850 	case ETHERTYPE_VLAN:
5851 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5852 		break;
5853 
5854 	default:
5855 		/*
5856 		 * Don't support this protocol or encapsulation.
5857 		 */
5858 		*fieldsp = 0;
5859 		*cmdp = 0;
5860 		return 0;
5861 	}
5862 
5863 	if ((m0->m_pkthdr.csum_flags &
5864 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
5865 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5866 	} else {
5867 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5868 	}
5869 	ipcse = offset + iphl - 1;
5870 
5871 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
5872 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
5873 	seg = 0;
5874 	fields = 0;
5875 
5876 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5877 		int hlen = offset + iphl;
5878 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5879 
5880 		if (__predict_false(m0->m_len <
5881 				    (hlen + sizeof(struct tcphdr)))) {
5882 			/*
5883 			 * TCP/IP headers are not in the first mbuf; we need
5884 			 * to do this the slow and painful way.  Let's just
5885 			 * hope this doesn't happen very often.
5886 			 */
5887 			struct tcphdr th;
5888 
5889 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5890 
5891 			m_copydata(m0, hlen, sizeof(th), &th);
5892 			if (v4) {
5893 				struct ip ip;
5894 
5895 				m_copydata(m0, offset, sizeof(ip), &ip);
5896 				ip.ip_len = 0;
5897 				m_copyback(m0,
5898 				    offset + offsetof(struct ip, ip_len),
5899 				    sizeof(ip.ip_len), &ip.ip_len);
5900 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5901 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5902 			} else {
5903 				struct ip6_hdr ip6;
5904 
5905 				m_copydata(m0, offset, sizeof(ip6), &ip6);
5906 				ip6.ip6_plen = 0;
5907 				m_copyback(m0,
5908 				    offset + offsetof(struct ip6_hdr, ip6_plen),
5909 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5910 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5911 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5912 			}
5913 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5914 			    sizeof(th.th_sum), &th.th_sum);
5915 
5916 			hlen += th.th_off << 2;
5917 		} else {
5918 			/*
5919 			 * TCP/IP headers are in the first mbuf; we can do
5920 			 * this the easy way.
5921 			 */
5922 			struct tcphdr *th;
5923 
5924 			if (v4) {
5925 				struct ip *ip =
5926 				    (void *)(mtod(m0, char *) + offset);
5927 				th = (void *)(mtod(m0, char *) + hlen);
5928 
5929 				ip->ip_len = 0;
5930 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5931 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5932 			} else {
5933 				struct ip6_hdr *ip6 =
5934 				    (void *)(mtod(m0, char *) + offset);
5935 				th = (void *)(mtod(m0, char *) + hlen);
5936 
5937 				ip6->ip6_plen = 0;
5938 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5939 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5940 			}
5941 			hlen += th->th_off << 2;
5942 		}
5943 
5944 		if (v4) {
5945 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
5946 			cmdlen |= WTX_TCPIP_CMD_IP;
5947 		} else {
5948 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
5949 			ipcse = 0;
5950 		}
5951 		cmd |= WTX_TCPIP_CMD_TSE;
5952 		cmdlen |= WTX_TCPIP_CMD_TSE |
5953 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
5954 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
5955 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
5956 	}
5957 
5958 	/*
5959 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
5960 	 * offload feature, if we load the context descriptor, we
5961 	 * MUST provide valid values for IPCSS and TUCSS fields.
5962 	 */
5963 
5964 	ipcs = WTX_TCPIP_IPCSS(offset) |
5965 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
5966 	    WTX_TCPIP_IPCSE(ipcse);
5967 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
5968 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
5969 		fields |= WTX_IXSM;
5970 	}
5971 
5972 	offset += iphl;
5973 
5974 	if (m0->m_pkthdr.csum_flags &
5975 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
5976 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
5977 		fields |= WTX_TXSM;
5978 		tucs = WTX_TCPIP_TUCSS(offset) |
5979 		    WTX_TCPIP_TUCSO(offset +
5980 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
5981 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
5982 	} else if ((m0->m_pkthdr.csum_flags &
5983 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
5984 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
5985 		fields |= WTX_TXSM;
5986 		tucs = WTX_TCPIP_TUCSS(offset) |
5987 		    WTX_TCPIP_TUCSO(offset +
5988 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
5989 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
5990 	} else {
5991 		/* Just initialize it to a valid TCP context. */
5992 		tucs = WTX_TCPIP_TUCSS(offset) |
5993 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
5994 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
5995 	}
5996 
5997 	/* Fill in the context descriptor. */
5998 	t = (struct livengood_tcpip_ctxdesc *)
5999 	    &txq->txq_descs[txq->txq_next];
6000 	t->tcpip_ipcs = htole32(ipcs);
6001 	t->tcpip_tucs = htole32(tucs);
6002 	t->tcpip_cmdlen = htole32(cmdlen);
6003 	t->tcpip_seg = htole32(seg);
6004 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6005 
6006 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6007 	txs->txs_ndesc++;
6008 
6009 	*cmdp = cmd;
6010 	*fieldsp = fields;
6011 
6012 	return 0;
6013 }
6014 
6015 /*
6016  * wm_start:		[ifnet interface function]
6017  *
6018  *	Start packet transmission on the interface.
6019  */
6020 static void
6021 wm_start(struct ifnet *ifp)
6022 {
6023 	struct wm_softc *sc = ifp->if_softc;
6024 	struct wm_txqueue *txq = &sc->sc_txq[0];
6025 
6026 	WM_TX_LOCK(txq);
6027 	if (!sc->sc_stopping)
6028 		wm_start_locked(ifp);
6029 	WM_TX_UNLOCK(txq);
6030 }
6031 
6032 static void
6033 wm_start_locked(struct ifnet *ifp)
6034 {
6035 	struct wm_softc *sc = ifp->if_softc;
6036 	struct wm_txqueue *txq = &sc->sc_txq[0];
6037 	struct mbuf *m0;
6038 	struct m_tag *mtag;
6039 	struct wm_txsoft *txs;
6040 	bus_dmamap_t dmamap;
6041 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6042 	bus_addr_t curaddr;
6043 	bus_size_t seglen, curlen;
6044 	uint32_t cksumcmd;
6045 	uint8_t cksumfields;
6046 
6047 	KASSERT(WM_TX_LOCKED(txq));
6048 
6049 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6050 		return;
6051 
6052 	/* Remember the previous number of free descriptors. */
6053 	ofree = txq->txq_free;
6054 
6055 	/*
6056 	 * Loop through the send queue, setting up transmit descriptors
6057 	 * until we drain the queue, or use up all available transmit
6058 	 * descriptors.
6059 	 */
6060 	for (;;) {
6061 		m0 = NULL;
6062 
6063 		/* Get a work queue entry. */
6064 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6065 			wm_txeof(sc);
6066 			if (txq->txq_sfree == 0) {
6067 				DPRINTF(WM_DEBUG_TX,
6068 				    ("%s: TX: no free job descriptors\n",
6069 					device_xname(sc->sc_dev)));
6070 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6071 				break;
6072 			}
6073 		}
6074 
6075 		/* Grab a packet off the queue. */
6076 		IFQ_DEQUEUE(&ifp->if_snd, m0);
6077 		if (m0 == NULL)
6078 			break;
6079 
6080 		DPRINTF(WM_DEBUG_TX,
6081 		    ("%s: TX: have packet to transmit: %p\n",
6082 		    device_xname(sc->sc_dev), m0));
6083 
6084 		txs = &txq->txq_soft[txq->txq_snext];
6085 		dmamap = txs->txs_dmamap;
6086 
6087 		use_tso = (m0->m_pkthdr.csum_flags &
6088 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6089 
6090 		/*
6091 		 * So says the Linux driver:
6092 		 * The controller does a simple calculation to make sure
6093 		 * there is enough room in the FIFO before initiating the
6094 		 * DMA for each buffer.  The calc is:
6095 		 *	4 = ceil(buffer len / MSS)
6096 		 * To make sure we don't overrun the FIFO, adjust the max
6097 		 * buffer len if the MSS drops.
6098 		 */
6099 		dmamap->dm_maxsegsz =
6100 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6101 		    ? m0->m_pkthdr.segsz << 2
6102 		    : WTX_MAX_LEN;
6103 
6104 		/*
6105 		 * Load the DMA map.  If this fails, the packet either
6106 		 * didn't fit in the allotted number of segments, or we
6107 		 * were short on resources.  For the too-many-segments
6108 		 * case, we simply report an error and drop the packet,
6109 		 * since we can't sanely copy a jumbo packet to a single
6110 		 * buffer.
6111 		 */
6112 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6113 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6114 		if (error) {
6115 			if (error == EFBIG) {
6116 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6117 				log(LOG_ERR, "%s: Tx packet consumes too many "
6118 				    "DMA segments, dropping...\n",
6119 				    device_xname(sc->sc_dev));
6120 				wm_dump_mbuf_chain(sc, m0);
6121 				m_freem(m0);
6122 				continue;
6123 			}
6124 			/*  Short on resources, just stop for now. */
6125 			DPRINTF(WM_DEBUG_TX,
6126 			    ("%s: TX: dmamap load failed: %d\n",
6127 			    device_xname(sc->sc_dev), error));
6128 			break;
6129 		}
6130 
6131 		segs_needed = dmamap->dm_nsegs;
6132 		if (use_tso) {
6133 			/* For sentinel descriptor; see below. */
6134 			segs_needed++;
6135 		}
6136 
6137 		/*
6138 		 * Ensure we have enough descriptors free to describe
6139 		 * the packet.  Note, we always reserve one descriptor
6140 		 * at the end of the ring due to the semantics of the
6141 		 * TDT register, plus one more in the event we need
6142 		 * to load offload context.
6143 		 */
6144 		if (segs_needed > txq->txq_free - 2) {
6145 			/*
6146 			 * Not enough free descriptors to transmit this
6147 			 * packet.  We haven't committed anything yet,
6148 			 * so just unload the DMA map, put the packet
6149 			 * pack on the queue, and punt.  Notify the upper
6150 			 * layer that there are no more slots left.
6151 			 */
6152 			DPRINTF(WM_DEBUG_TX,
6153 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6154 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6155 			    segs_needed, txq->txq_free - 1));
6156 			ifp->if_flags |= IFF_OACTIVE;
6157 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6158 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6159 			break;
6160 		}
6161 
6162 		/*
6163 		 * Check for 82547 Tx FIFO bug.  We need to do this
6164 		 * once we know we can transmit the packet, since we
6165 		 * do some internal FIFO space accounting here.
6166 		 */
6167 		if (sc->sc_type == WM_T_82547 &&
6168 		    wm_82547_txfifo_bugchk(sc, m0)) {
6169 			DPRINTF(WM_DEBUG_TX,
6170 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
6171 			    device_xname(sc->sc_dev)));
6172 			ifp->if_flags |= IFF_OACTIVE;
6173 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6174 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
6175 			break;
6176 		}
6177 
6178 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6179 
6180 		DPRINTF(WM_DEBUG_TX,
6181 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6182 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6183 
6184 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6185 
6186 		/*
6187 		 * Store a pointer to the packet so that we can free it
6188 		 * later.
6189 		 *
6190 		 * Initially, we consider the number of descriptors the
6191 		 * packet uses the number of DMA segments.  This may be
6192 		 * incremented by 1 if we do checksum offload (a descriptor
6193 		 * is used to set the checksum context).
6194 		 */
6195 		txs->txs_mbuf = m0;
6196 		txs->txs_firstdesc = txq->txq_next;
6197 		txs->txs_ndesc = segs_needed;
6198 
6199 		/* Set up offload parameters for this packet. */
6200 		if (m0->m_pkthdr.csum_flags &
6201 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6202 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6203 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6204 			if (wm_tx_offload(sc, txs, &cksumcmd,
6205 					  &cksumfields) != 0) {
6206 				/* Error message already displayed. */
6207 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6208 				continue;
6209 			}
6210 		} else {
6211 			cksumcmd = 0;
6212 			cksumfields = 0;
6213 		}
6214 
6215 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6216 
6217 		/* Sync the DMA map. */
6218 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6219 		    BUS_DMASYNC_PREWRITE);
6220 
6221 		/* Initialize the transmit descriptor. */
6222 		for (nexttx = txq->txq_next, seg = 0;
6223 		     seg < dmamap->dm_nsegs; seg++) {
6224 			for (seglen = dmamap->dm_segs[seg].ds_len,
6225 			     curaddr = dmamap->dm_segs[seg].ds_addr;
6226 			     seglen != 0;
6227 			     curaddr += curlen, seglen -= curlen,
6228 			     nexttx = WM_NEXTTX(txq, nexttx)) {
6229 				curlen = seglen;
6230 
6231 				/*
6232 				 * So says the Linux driver:
6233 				 * Work around for premature descriptor
6234 				 * write-backs in TSO mode.  Append a
6235 				 * 4-byte sentinel descriptor.
6236 				 */
6237 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6238 				    curlen > 8)
6239 					curlen -= 4;
6240 
6241 				wm_set_dma_addr(
6242 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
6243 				txq->txq_descs[nexttx].wtx_cmdlen
6244 				    = htole32(cksumcmd | curlen);
6245 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
6246 				    = 0;
6247 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
6248 				    = cksumfields;
6249 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6250 				lasttx = nexttx;
6251 
6252 				DPRINTF(WM_DEBUG_TX,
6253 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
6254 				     "len %#04zx\n",
6255 				    device_xname(sc->sc_dev), nexttx,
6256 				    (uint64_t)curaddr, curlen));
6257 			}
6258 		}
6259 
6260 		KASSERT(lasttx != -1);
6261 
6262 		/*
6263 		 * Set up the command byte on the last descriptor of
6264 		 * the packet.  If we're in the interrupt delay window,
6265 		 * delay the interrupt.
6266 		 */
6267 		txq->txq_descs[lasttx].wtx_cmdlen |=
6268 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6269 
6270 		/*
6271 		 * If VLANs are enabled and the packet has a VLAN tag, set
6272 		 * up the descriptor to encapsulate the packet for us.
6273 		 *
6274 		 * This is only valid on the last descriptor of the packet.
6275 		 */
6276 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6277 			txq->txq_descs[lasttx].wtx_cmdlen |=
6278 			    htole32(WTX_CMD_VLE);
6279 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6280 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6281 		}
6282 
6283 		txs->txs_lastdesc = lasttx;
6284 
6285 		DPRINTF(WM_DEBUG_TX,
6286 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
6287 		    device_xname(sc->sc_dev),
6288 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6289 
6290 		/* Sync the descriptors we're using. */
6291 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6292 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6293 
6294 		/* Give the packet to the chip. */
6295 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6296 
6297 		DPRINTF(WM_DEBUG_TX,
6298 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6299 
6300 		DPRINTF(WM_DEBUG_TX,
6301 		    ("%s: TX: finished transmitting packet, job %d\n",
6302 		    device_xname(sc->sc_dev), txq->txq_snext));
6303 
6304 		/* Advance the tx pointer. */
6305 		txq->txq_free -= txs->txs_ndesc;
6306 		txq->txq_next = nexttx;
6307 
6308 		txq->txq_sfree--;
6309 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6310 
6311 		/* Pass the packet to any BPF listeners. */
6312 		bpf_mtap(ifp, m0);
6313 	}
6314 
6315 	if (m0 != NULL) {
6316 		ifp->if_flags |= IFF_OACTIVE;
6317 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6318 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6319 			__func__));
6320 		m_freem(m0);
6321 	}
6322 
6323 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6324 		/* No more slots; notify upper layer. */
6325 		ifp->if_flags |= IFF_OACTIVE;
6326 	}
6327 
6328 	if (txq->txq_free != ofree) {
6329 		/* Set a watchdog timer in case the chip flakes out. */
6330 		ifp->if_timer = 5;
6331 	}
6332 }
6333 
6334 /*
6335  * wm_nq_tx_offload:
6336  *
6337  *	Set up TCP/IP checksumming parameters for the
6338  *	specified packet, for NEWQUEUE devices
6339  */
6340 static int
6341 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs,
6342     uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6343 {
6344 	struct wm_txqueue *txq = &sc->sc_txq[0];
6345 	struct mbuf *m0 = txs->txs_mbuf;
6346 	struct m_tag *mtag;
6347 	uint32_t vl_len, mssidx, cmdc;
6348 	struct ether_header *eh;
6349 	int offset, iphl;
6350 
6351 	/*
6352 	 * XXX It would be nice if the mbuf pkthdr had offset
6353 	 * fields for the protocol headers.
6354 	 */
6355 	*cmdlenp = 0;
6356 	*fieldsp = 0;
6357 
6358 	eh = mtod(m0, struct ether_header *);
6359 	switch (htons(eh->ether_type)) {
6360 	case ETHERTYPE_IP:
6361 	case ETHERTYPE_IPV6:
6362 		offset = ETHER_HDR_LEN;
6363 		break;
6364 
6365 	case ETHERTYPE_VLAN:
6366 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6367 		break;
6368 
6369 	default:
6370 		/* Don't support this protocol or encapsulation. */
6371 		*do_csum = false;
6372 		return 0;
6373 	}
6374 	*do_csum = true;
6375 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6376 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6377 
6378 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6379 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6380 
6381 	if ((m0->m_pkthdr.csum_flags &
6382 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6383 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6384 	} else {
6385 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6386 	}
6387 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6388 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6389 
6390 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6391 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6392 		     << NQTXC_VLLEN_VLAN_SHIFT);
6393 		*cmdlenp |= NQTX_CMD_VLE;
6394 	}
6395 
6396 	mssidx = 0;
6397 
6398 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6399 		int hlen = offset + iphl;
6400 		int tcp_hlen;
6401 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6402 
6403 		if (__predict_false(m0->m_len <
6404 				    (hlen + sizeof(struct tcphdr)))) {
6405 			/*
6406 			 * TCP/IP headers are not in the first mbuf; we need
6407 			 * to do this the slow and painful way.  Let's just
6408 			 * hope this doesn't happen very often.
6409 			 */
6410 			struct tcphdr th;
6411 
6412 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
6413 
6414 			m_copydata(m0, hlen, sizeof(th), &th);
6415 			if (v4) {
6416 				struct ip ip;
6417 
6418 				m_copydata(m0, offset, sizeof(ip), &ip);
6419 				ip.ip_len = 0;
6420 				m_copyback(m0,
6421 				    offset + offsetof(struct ip, ip_len),
6422 				    sizeof(ip.ip_len), &ip.ip_len);
6423 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6424 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6425 			} else {
6426 				struct ip6_hdr ip6;
6427 
6428 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6429 				ip6.ip6_plen = 0;
6430 				m_copyback(m0,
6431 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6432 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6433 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6434 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6435 			}
6436 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6437 			    sizeof(th.th_sum), &th.th_sum);
6438 
6439 			tcp_hlen = th.th_off << 2;
6440 		} else {
6441 			/*
6442 			 * TCP/IP headers are in the first mbuf; we can do
6443 			 * this the easy way.
6444 			 */
6445 			struct tcphdr *th;
6446 
6447 			if (v4) {
6448 				struct ip *ip =
6449 				    (void *)(mtod(m0, char *) + offset);
6450 				th = (void *)(mtod(m0, char *) + hlen);
6451 
6452 				ip->ip_len = 0;
6453 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6454 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6455 			} else {
6456 				struct ip6_hdr *ip6 =
6457 				    (void *)(mtod(m0, char *) + offset);
6458 				th = (void *)(mtod(m0, char *) + hlen);
6459 
6460 				ip6->ip6_plen = 0;
6461 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6462 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6463 			}
6464 			tcp_hlen = th->th_off << 2;
6465 		}
6466 		hlen += tcp_hlen;
6467 		*cmdlenp |= NQTX_CMD_TSE;
6468 
6469 		if (v4) {
6470 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
6471 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6472 		} else {
6473 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6474 			*fieldsp |= NQTXD_FIELDS_TUXSM;
6475 		}
6476 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6477 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6478 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6479 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6480 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6481 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6482 	} else {
6483 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6484 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6485 	}
6486 
6487 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6488 		*fieldsp |= NQTXD_FIELDS_IXSM;
6489 		cmdc |= NQTXC_CMD_IP4;
6490 	}
6491 
6492 	if (m0->m_pkthdr.csum_flags &
6493 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6494 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6495 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6496 			cmdc |= NQTXC_CMD_TCP;
6497 		} else {
6498 			cmdc |= NQTXC_CMD_UDP;
6499 		}
6500 		cmdc |= NQTXC_CMD_IP4;
6501 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6502 	}
6503 	if (m0->m_pkthdr.csum_flags &
6504 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6505 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6506 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6507 			cmdc |= NQTXC_CMD_TCP;
6508 		} else {
6509 			cmdc |= NQTXC_CMD_UDP;
6510 		}
6511 		cmdc |= NQTXC_CMD_IP6;
6512 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6513 	}
6514 
6515 	/* Fill in the context descriptor. */
6516 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6517 	    htole32(vl_len);
6518 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6519 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6520 	    htole32(cmdc);
6521 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6522 	    htole32(mssidx);
6523 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6524 	DPRINTF(WM_DEBUG_TX,
6525 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6526 	    txq->txq_next, 0, vl_len));
6527 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6528 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6529 	txs->txs_ndesc++;
6530 	return 0;
6531 }
6532 
6533 /*
6534  * wm_nq_start:		[ifnet interface function]
6535  *
6536  *	Start packet transmission on the interface for NEWQUEUE devices
6537  */
6538 static void
6539 wm_nq_start(struct ifnet *ifp)
6540 {
6541 	struct wm_softc *sc = ifp->if_softc;
6542 	struct wm_txqueue *txq = &sc->sc_txq[0];
6543 
6544 	WM_TX_LOCK(txq);
6545 	if (!sc->sc_stopping)
6546 		wm_nq_start_locked(ifp);
6547 	WM_TX_UNLOCK(txq);
6548 }
6549 
6550 static void
6551 wm_nq_start_locked(struct ifnet *ifp)
6552 {
6553 	struct wm_softc *sc = ifp->if_softc;
6554 	struct wm_txqueue *txq = &sc->sc_txq[0];
6555 	struct mbuf *m0;
6556 	struct m_tag *mtag;
6557 	struct wm_txsoft *txs;
6558 	bus_dmamap_t dmamap;
6559 	int error, nexttx, lasttx = -1, seg, segs_needed;
6560 	bool do_csum, sent;
6561 
6562 	KASSERT(WM_TX_LOCKED(txq));
6563 
6564 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6565 		return;
6566 
6567 	sent = false;
6568 
6569 	/*
6570 	 * Loop through the send queue, setting up transmit descriptors
6571 	 * until we drain the queue, or use up all available transmit
6572 	 * descriptors.
6573 	 */
6574 	for (;;) {
6575 		m0 = NULL;
6576 
6577 		/* Get a work queue entry. */
6578 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6579 			wm_txeof(sc);
6580 			if (txq->txq_sfree == 0) {
6581 				DPRINTF(WM_DEBUG_TX,
6582 				    ("%s: TX: no free job descriptors\n",
6583 					device_xname(sc->sc_dev)));
6584 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6585 				break;
6586 			}
6587 		}
6588 
6589 		/* Grab a packet off the queue. */
6590 		IFQ_DEQUEUE(&ifp->if_snd, m0);
6591 		if (m0 == NULL)
6592 			break;
6593 
6594 		DPRINTF(WM_DEBUG_TX,
6595 		    ("%s: TX: have packet to transmit: %p\n",
6596 		    device_xname(sc->sc_dev), m0));
6597 
6598 		txs = &txq->txq_soft[txq->txq_snext];
6599 		dmamap = txs->txs_dmamap;
6600 
6601 		/*
6602 		 * Load the DMA map.  If this fails, the packet either
6603 		 * didn't fit in the allotted number of segments, or we
6604 		 * were short on resources.  For the too-many-segments
6605 		 * case, we simply report an error and drop the packet,
6606 		 * since we can't sanely copy a jumbo packet to a single
6607 		 * buffer.
6608 		 */
6609 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6610 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6611 		if (error) {
6612 			if (error == EFBIG) {
6613 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6614 				log(LOG_ERR, "%s: Tx packet consumes too many "
6615 				    "DMA segments, dropping...\n",
6616 				    device_xname(sc->sc_dev));
6617 				wm_dump_mbuf_chain(sc, m0);
6618 				m_freem(m0);
6619 				continue;
6620 			}
6621 			/* Short on resources, just stop for now. */
6622 			DPRINTF(WM_DEBUG_TX,
6623 			    ("%s: TX: dmamap load failed: %d\n",
6624 			    device_xname(sc->sc_dev), error));
6625 			break;
6626 		}
6627 
6628 		segs_needed = dmamap->dm_nsegs;
6629 
6630 		/*
6631 		 * Ensure we have enough descriptors free to describe
6632 		 * the packet.  Note, we always reserve one descriptor
6633 		 * at the end of the ring due to the semantics of the
6634 		 * TDT register, plus one more in the event we need
6635 		 * to load offload context.
6636 		 */
6637 		if (segs_needed > txq->txq_free - 2) {
6638 			/*
6639 			 * Not enough free descriptors to transmit this
6640 			 * packet.  We haven't committed anything yet,
6641 			 * so just unload the DMA map, put the packet
6642 			 * pack on the queue, and punt.  Notify the upper
6643 			 * layer that there are no more slots left.
6644 			 */
6645 			DPRINTF(WM_DEBUG_TX,
6646 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6647 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6648 			    segs_needed, txq->txq_free - 1));
6649 			ifp->if_flags |= IFF_OACTIVE;
6650 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6651 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6652 			break;
6653 		}
6654 
6655 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6656 
6657 		DPRINTF(WM_DEBUG_TX,
6658 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6659 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6660 
6661 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6662 
6663 		/*
6664 		 * Store a pointer to the packet so that we can free it
6665 		 * later.
6666 		 *
6667 		 * Initially, we consider the number of descriptors the
6668 		 * packet uses the number of DMA segments.  This may be
6669 		 * incremented by 1 if we do checksum offload (a descriptor
6670 		 * is used to set the checksum context).
6671 		 */
6672 		txs->txs_mbuf = m0;
6673 		txs->txs_firstdesc = txq->txq_next;
6674 		txs->txs_ndesc = segs_needed;
6675 
6676 		/* Set up offload parameters for this packet. */
6677 		uint32_t cmdlen, fields, dcmdlen;
6678 		if (m0->m_pkthdr.csum_flags &
6679 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6680 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6681 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6682 			if (wm_nq_tx_offload(sc, txs, &cmdlen, &fields,
6683 			    &do_csum) != 0) {
6684 				/* Error message already displayed. */
6685 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6686 				continue;
6687 			}
6688 		} else {
6689 			do_csum = false;
6690 			cmdlen = 0;
6691 			fields = 0;
6692 		}
6693 
6694 		/* Sync the DMA map. */
6695 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6696 		    BUS_DMASYNC_PREWRITE);
6697 
6698 		/* Initialize the first transmit descriptor. */
6699 		nexttx = txq->txq_next;
6700 		if (!do_csum) {
6701 			/* setup a legacy descriptor */
6702 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
6703 			    dmamap->dm_segs[0].ds_addr);
6704 			txq->txq_descs[nexttx].wtx_cmdlen =
6705 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6706 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6707 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6708 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6709 			    NULL) {
6710 				txq->txq_descs[nexttx].wtx_cmdlen |=
6711 				    htole32(WTX_CMD_VLE);
6712 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6713 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6714 			} else {
6715 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6716 			}
6717 			dcmdlen = 0;
6718 		} else {
6719 			/* setup an advanced data descriptor */
6720 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6721 			    htole64(dmamap->dm_segs[0].ds_addr);
6722 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6723 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6724 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6725 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6726 			    htole32(fields);
6727 			DPRINTF(WM_DEBUG_TX,
6728 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6729 			    device_xname(sc->sc_dev), nexttx,
6730 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
6731 			DPRINTF(WM_DEBUG_TX,
6732 			    ("\t 0x%08x%08x\n", fields,
6733 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6734 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6735 		}
6736 
6737 		lasttx = nexttx;
6738 		nexttx = WM_NEXTTX(txq, nexttx);
6739 		/*
6740 		 * fill in the next descriptors. legacy or adcanced format
6741 		 * is the same here
6742 		 */
6743 		for (seg = 1; seg < dmamap->dm_nsegs;
6744 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
6745 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6746 			    htole64(dmamap->dm_segs[seg].ds_addr);
6747 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6748 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6749 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6750 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
6751 			lasttx = nexttx;
6752 
6753 			DPRINTF(WM_DEBUG_TX,
6754 			    ("%s: TX: desc %d: %#" PRIx64 ", "
6755 			     "len %#04zx\n",
6756 			    device_xname(sc->sc_dev), nexttx,
6757 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
6758 			    dmamap->dm_segs[seg].ds_len));
6759 		}
6760 
6761 		KASSERT(lasttx != -1);
6762 
6763 		/*
6764 		 * Set up the command byte on the last descriptor of
6765 		 * the packet.  If we're in the interrupt delay window,
6766 		 * delay the interrupt.
6767 		 */
6768 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6769 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
6770 		txq->txq_descs[lasttx].wtx_cmdlen |=
6771 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6772 
6773 		txs->txs_lastdesc = lasttx;
6774 
6775 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
6776 		    device_xname(sc->sc_dev),
6777 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6778 
6779 		/* Sync the descriptors we're using. */
6780 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6781 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6782 
6783 		/* Give the packet to the chip. */
6784 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6785 		sent = true;
6786 
6787 		DPRINTF(WM_DEBUG_TX,
6788 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6789 
6790 		DPRINTF(WM_DEBUG_TX,
6791 		    ("%s: TX: finished transmitting packet, job %d\n",
6792 		    device_xname(sc->sc_dev), txq->txq_snext));
6793 
6794 		/* Advance the tx pointer. */
6795 		txq->txq_free -= txs->txs_ndesc;
6796 		txq->txq_next = nexttx;
6797 
6798 		txq->txq_sfree--;
6799 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6800 
6801 		/* Pass the packet to any BPF listeners. */
6802 		bpf_mtap(ifp, m0);
6803 	}
6804 
6805 	if (m0 != NULL) {
6806 		ifp->if_flags |= IFF_OACTIVE;
6807 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6808 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6809 			__func__));
6810 		m_freem(m0);
6811 	}
6812 
6813 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6814 		/* No more slots; notify upper layer. */
6815 		ifp->if_flags |= IFF_OACTIVE;
6816 	}
6817 
6818 	if (sent) {
6819 		/* Set a watchdog timer in case the chip flakes out. */
6820 		ifp->if_timer = 5;
6821 	}
6822 }
6823 
6824 /* Interrupt */
6825 
6826 /*
6827  * wm_txeof:
6828  *
6829  *	Helper; handle transmit interrupts.
6830  */
6831 static int
6832 wm_txeof(struct wm_softc *sc)
6833 {
6834 	struct wm_txqueue *txq = &sc->sc_txq[0];
6835 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6836 	struct wm_txsoft *txs;
6837 	bool processed = false;
6838 	int count = 0;
6839 	int i;
6840 	uint8_t status;
6841 
6842 	if (sc->sc_stopping)
6843 		return 0;
6844 
6845 	ifp->if_flags &= ~IFF_OACTIVE;
6846 
6847 	/*
6848 	 * Go through the Tx list and free mbufs for those
6849 	 * frames which have been transmitted.
6850 	 */
6851 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
6852 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
6853 		txs = &txq->txq_soft[i];
6854 
6855 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
6856 			device_xname(sc->sc_dev), i));
6857 
6858 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
6859 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6860 
6861 		status =
6862 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6863 		if ((status & WTX_ST_DD) == 0) {
6864 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
6865 			    BUS_DMASYNC_PREREAD);
6866 			break;
6867 		}
6868 
6869 		processed = true;
6870 		count++;
6871 		DPRINTF(WM_DEBUG_TX,
6872 		    ("%s: TX: job %d done: descs %d..%d\n",
6873 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
6874 		    txs->txs_lastdesc));
6875 
6876 		/*
6877 		 * XXX We should probably be using the statistics
6878 		 * XXX registers, but I don't know if they exist
6879 		 * XXX on chips before the i82544.
6880 		 */
6881 
6882 #ifdef WM_EVENT_COUNTERS
6883 		if (status & WTX_ST_TU)
6884 			WM_EVCNT_INCR(&sc->sc_ev_tu);
6885 #endif /* WM_EVENT_COUNTERS */
6886 
6887 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
6888 			ifp->if_oerrors++;
6889 			if (status & WTX_ST_LC)
6890 				log(LOG_WARNING, "%s: late collision\n",
6891 				    device_xname(sc->sc_dev));
6892 			else if (status & WTX_ST_EC) {
6893 				ifp->if_collisions += 16;
6894 				log(LOG_WARNING, "%s: excessive collisions\n",
6895 				    device_xname(sc->sc_dev));
6896 			}
6897 		} else
6898 			ifp->if_opackets++;
6899 
6900 		txq->txq_free += txs->txs_ndesc;
6901 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
6902 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
6903 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
6904 		m_freem(txs->txs_mbuf);
6905 		txs->txs_mbuf = NULL;
6906 	}
6907 
6908 	/* Update the dirty transmit buffer pointer. */
6909 	txq->txq_sdirty = i;
6910 	DPRINTF(WM_DEBUG_TX,
6911 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
6912 
6913 	if (count != 0)
6914 		rnd_add_uint32(&sc->rnd_source, count);
6915 
6916 	/*
6917 	 * If there are no more pending transmissions, cancel the watchdog
6918 	 * timer.
6919 	 */
6920 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
6921 		ifp->if_timer = 0;
6922 
6923 	return processed;
6924 }
6925 
6926 /*
6927  * wm_rxeof:
6928  *
6929  *	Helper; handle receive interrupts.
6930  */
6931 static void
6932 wm_rxeof(struct wm_rxqueue *rxq)
6933 {
6934 	struct wm_softc *sc = rxq->rxq_sc;
6935 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6936 	struct wm_rxsoft *rxs;
6937 	struct mbuf *m;
6938 	int i, len;
6939 	int count = 0;
6940 	uint8_t status, errors;
6941 	uint16_t vlantag;
6942 
6943 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
6944 		rxs = &rxq->rxq_soft[i];
6945 
6946 		DPRINTF(WM_DEBUG_RX,
6947 		    ("%s: RX: checking descriptor %d\n",
6948 		    device_xname(sc->sc_dev), i));
6949 
6950 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
6951 
6952 		status = rxq->rxq_descs[i].wrx_status;
6953 		errors = rxq->rxq_descs[i].wrx_errors;
6954 		len = le16toh(rxq->rxq_descs[i].wrx_len);
6955 		vlantag = rxq->rxq_descs[i].wrx_special;
6956 
6957 		if ((status & WRX_ST_DD) == 0) {
6958 			/* We have processed all of the receive descriptors. */
6959 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
6960 			break;
6961 		}
6962 
6963 		count++;
6964 		if (__predict_false(rxq->rxq_discard)) {
6965 			DPRINTF(WM_DEBUG_RX,
6966 			    ("%s: RX: discarding contents of descriptor %d\n",
6967 			    device_xname(sc->sc_dev), i));
6968 			wm_init_rxdesc(rxq, i);
6969 			if (status & WRX_ST_EOP) {
6970 				/* Reset our state. */
6971 				DPRINTF(WM_DEBUG_RX,
6972 				    ("%s: RX: resetting rxdiscard -> 0\n",
6973 				    device_xname(sc->sc_dev)));
6974 				rxq->rxq_discard = 0;
6975 			}
6976 			continue;
6977 		}
6978 
6979 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6980 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
6981 
6982 		m = rxs->rxs_mbuf;
6983 
6984 		/*
6985 		 * Add a new receive buffer to the ring, unless of
6986 		 * course the length is zero. Treat the latter as a
6987 		 * failed mapping.
6988 		 */
6989 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
6990 			/*
6991 			 * Failed, throw away what we've done so
6992 			 * far, and discard the rest of the packet.
6993 			 */
6994 			ifp->if_ierrors++;
6995 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
6996 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
6997 			wm_init_rxdesc(rxq, i);
6998 			if ((status & WRX_ST_EOP) == 0)
6999 				rxq->rxq_discard = 1;
7000 			if (rxq->rxq_head != NULL)
7001 				m_freem(rxq->rxq_head);
7002 			WM_RXCHAIN_RESET(rxq);
7003 			DPRINTF(WM_DEBUG_RX,
7004 			    ("%s: RX: Rx buffer allocation failed, "
7005 			    "dropping packet%s\n", device_xname(sc->sc_dev),
7006 			    rxq->rxq_discard ? " (discard)" : ""));
7007 			continue;
7008 		}
7009 
7010 		m->m_len = len;
7011 		rxq->rxq_len += len;
7012 		DPRINTF(WM_DEBUG_RX,
7013 		    ("%s: RX: buffer at %p len %d\n",
7014 		    device_xname(sc->sc_dev), m->m_data, len));
7015 
7016 		/* If this is not the end of the packet, keep looking. */
7017 		if ((status & WRX_ST_EOP) == 0) {
7018 			WM_RXCHAIN_LINK(rxq, m);
7019 			DPRINTF(WM_DEBUG_RX,
7020 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
7021 			    device_xname(sc->sc_dev), rxq->rxq_len));
7022 			continue;
7023 		}
7024 
7025 		/*
7026 		 * Okay, we have the entire packet now.  The chip is
7027 		 * configured to include the FCS except I350 and I21[01]
7028 		 * (not all chips can be configured to strip it),
7029 		 * so we need to trim it.
7030 		 * May need to adjust length of previous mbuf in the
7031 		 * chain if the current mbuf is too short.
7032 		 * For an eratta, the RCTL_SECRC bit in RCTL register
7033 		 * is always set in I350, so we don't trim it.
7034 		 */
7035 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7036 		    && (sc->sc_type != WM_T_I210)
7037 		    && (sc->sc_type != WM_T_I211)) {
7038 			if (m->m_len < ETHER_CRC_LEN) {
7039 				rxq->rxq_tail->m_len
7040 				    -= (ETHER_CRC_LEN - m->m_len);
7041 				m->m_len = 0;
7042 			} else
7043 				m->m_len -= ETHER_CRC_LEN;
7044 			len = rxq->rxq_len - ETHER_CRC_LEN;
7045 		} else
7046 			len = rxq->rxq_len;
7047 
7048 		WM_RXCHAIN_LINK(rxq, m);
7049 
7050 		*rxq->rxq_tailp = NULL;
7051 		m = rxq->rxq_head;
7052 
7053 		WM_RXCHAIN_RESET(rxq);
7054 
7055 		DPRINTF(WM_DEBUG_RX,
7056 		    ("%s: RX: have entire packet, len -> %d\n",
7057 		    device_xname(sc->sc_dev), len));
7058 
7059 		/* If an error occurred, update stats and drop the packet. */
7060 		if (errors &
7061 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7062 			if (errors & WRX_ER_SE)
7063 				log(LOG_WARNING, "%s: symbol error\n",
7064 				    device_xname(sc->sc_dev));
7065 			else if (errors & WRX_ER_SEQ)
7066 				log(LOG_WARNING, "%s: receive sequence error\n",
7067 				    device_xname(sc->sc_dev));
7068 			else if (errors & WRX_ER_CE)
7069 				log(LOG_WARNING, "%s: CRC error\n",
7070 				    device_xname(sc->sc_dev));
7071 			m_freem(m);
7072 			continue;
7073 		}
7074 
7075 		/* No errors.  Receive the packet. */
7076 		m->m_pkthdr.rcvif = ifp;
7077 		m->m_pkthdr.len = len;
7078 
7079 		/*
7080 		 * If VLANs are enabled, VLAN packets have been unwrapped
7081 		 * for us.  Associate the tag with the packet.
7082 		 */
7083 		/* XXXX should check for i350 and i354 */
7084 		if ((status & WRX_ST_VP) != 0) {
7085 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7086 		}
7087 
7088 		/* Set up checksum info for this packet. */
7089 		if ((status & WRX_ST_IXSM) == 0) {
7090 			if (status & WRX_ST_IPCS) {
7091 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
7092 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7093 				if (errors & WRX_ER_IPE)
7094 					m->m_pkthdr.csum_flags |=
7095 					    M_CSUM_IPv4_BAD;
7096 			}
7097 			if (status & WRX_ST_TCPCS) {
7098 				/*
7099 				 * Note: we don't know if this was TCP or UDP,
7100 				 * so we just set both bits, and expect the
7101 				 * upper layers to deal.
7102 				 */
7103 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
7104 				m->m_pkthdr.csum_flags |=
7105 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7106 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
7107 				if (errors & WRX_ER_TCPE)
7108 					m->m_pkthdr.csum_flags |=
7109 					    M_CSUM_TCP_UDP_BAD;
7110 			}
7111 		}
7112 
7113 		ifp->if_ipackets++;
7114 
7115 		WM_RX_UNLOCK(rxq);
7116 
7117 		/* Pass this up to any BPF listeners. */
7118 		bpf_mtap(ifp, m);
7119 
7120 		/* Pass it on. */
7121 		(*ifp->if_input)(ifp, m);
7122 
7123 		WM_RX_LOCK(rxq);
7124 
7125 		if (sc->sc_stopping)
7126 			break;
7127 	}
7128 
7129 	/* Update the receive pointer. */
7130 	rxq->rxq_ptr = i;
7131 	if (count != 0)
7132 		rnd_add_uint32(&sc->rnd_source, count);
7133 
7134 	DPRINTF(WM_DEBUG_RX,
7135 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7136 }
7137 
7138 /*
7139  * wm_linkintr_gmii:
7140  *
7141  *	Helper; handle link interrupts for GMII.
7142  */
7143 static void
7144 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7145 {
7146 
7147 	KASSERT(WM_CORE_LOCKED(sc));
7148 
7149 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7150 		__func__));
7151 
7152 	if (icr & ICR_LSC) {
7153 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
7154 
7155 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7156 			wm_gig_downshift_workaround_ich8lan(sc);
7157 
7158 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7159 			device_xname(sc->sc_dev)));
7160 		mii_pollstat(&sc->sc_mii);
7161 		if (sc->sc_type == WM_T_82543) {
7162 			int miistatus, active;
7163 
7164 			/*
7165 			 * With 82543, we need to force speed and
7166 			 * duplex on the MAC equal to what the PHY
7167 			 * speed and duplex configuration is.
7168 			 */
7169 			miistatus = sc->sc_mii.mii_media_status;
7170 
7171 			if (miistatus & IFM_ACTIVE) {
7172 				active = sc->sc_mii.mii_media_active;
7173 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7174 				switch (IFM_SUBTYPE(active)) {
7175 				case IFM_10_T:
7176 					sc->sc_ctrl |= CTRL_SPEED_10;
7177 					break;
7178 				case IFM_100_TX:
7179 					sc->sc_ctrl |= CTRL_SPEED_100;
7180 					break;
7181 				case IFM_1000_T:
7182 					sc->sc_ctrl |= CTRL_SPEED_1000;
7183 					break;
7184 				default:
7185 					/*
7186 					 * fiber?
7187 					 * Shoud not enter here.
7188 					 */
7189 					printf("unknown media (%x)\n", active);
7190 					break;
7191 				}
7192 				if (active & IFM_FDX)
7193 					sc->sc_ctrl |= CTRL_FD;
7194 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7195 			}
7196 		} else if ((sc->sc_type == WM_T_ICH8)
7197 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
7198 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
7199 		} else if (sc->sc_type == WM_T_PCH) {
7200 			wm_k1_gig_workaround_hv(sc,
7201 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7202 		}
7203 
7204 		if ((sc->sc_phytype == WMPHY_82578)
7205 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7206 			== IFM_1000_T)) {
7207 
7208 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7209 				delay(200*1000); /* XXX too big */
7210 
7211 				/* Link stall fix for link up */
7212 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7213 				    HV_MUX_DATA_CTRL,
7214 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
7215 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
7216 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7217 				    HV_MUX_DATA_CTRL,
7218 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
7219 			}
7220 		}
7221 	} else if (icr & ICR_RXSEQ) {
7222 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7223 			device_xname(sc->sc_dev)));
7224 	}
7225 }
7226 
7227 /*
7228  * wm_linkintr_tbi:
7229  *
7230  *	Helper; handle link interrupts for TBI mode.
7231  */
7232 static void
7233 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7234 {
7235 	uint32_t status;
7236 
7237 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7238 		__func__));
7239 
7240 	status = CSR_READ(sc, WMREG_STATUS);
7241 	if (icr & ICR_LSC) {
7242 		if (status & STATUS_LU) {
7243 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7244 			    device_xname(sc->sc_dev),
7245 			    (status & STATUS_FD) ? "FDX" : "HDX"));
7246 			/*
7247 			 * NOTE: CTRL will update TFCE and RFCE automatically,
7248 			 * so we should update sc->sc_ctrl
7249 			 */
7250 
7251 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7252 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7253 			sc->sc_fcrtl &= ~FCRTL_XONE;
7254 			if (status & STATUS_FD)
7255 				sc->sc_tctl |=
7256 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7257 			else
7258 				sc->sc_tctl |=
7259 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7260 			if (sc->sc_ctrl & CTRL_TFCE)
7261 				sc->sc_fcrtl |= FCRTL_XONE;
7262 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7263 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7264 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
7265 				      sc->sc_fcrtl);
7266 			sc->sc_tbi_linkup = 1;
7267 		} else {
7268 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7269 			    device_xname(sc->sc_dev)));
7270 			sc->sc_tbi_linkup = 0;
7271 		}
7272 		/* Update LED */
7273 		wm_tbi_serdes_set_linkled(sc);
7274 	} else if (icr & ICR_RXSEQ) {
7275 		DPRINTF(WM_DEBUG_LINK,
7276 		    ("%s: LINK: Receive sequence error\n",
7277 		    device_xname(sc->sc_dev)));
7278 	}
7279 }
7280 
7281 /*
7282  * wm_linkintr_serdes:
7283  *
7284  *	Helper; handle link interrupts for TBI mode.
7285  */
7286 static void
7287 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7288 {
7289 	struct mii_data *mii = &sc->sc_mii;
7290 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7291 	uint32_t pcs_adv, pcs_lpab, reg;
7292 
7293 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7294 		__func__));
7295 
7296 	if (icr & ICR_LSC) {
7297 		/* Check PCS */
7298 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
7299 		if ((reg & PCS_LSTS_LINKOK) != 0) {
7300 			mii->mii_media_status |= IFM_ACTIVE;
7301 			sc->sc_tbi_linkup = 1;
7302 		} else {
7303 			mii->mii_media_status |= IFM_NONE;
7304 			sc->sc_tbi_linkup = 0;
7305 			wm_tbi_serdes_set_linkled(sc);
7306 			return;
7307 		}
7308 		mii->mii_media_active |= IFM_1000_SX;
7309 		if ((reg & PCS_LSTS_FDX) != 0)
7310 			mii->mii_media_active |= IFM_FDX;
7311 		else
7312 			mii->mii_media_active |= IFM_HDX;
7313 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7314 			/* Check flow */
7315 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
7316 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
7317 				DPRINTF(WM_DEBUG_LINK,
7318 				    ("XXX LINKOK but not ACOMP\n"));
7319 				return;
7320 			}
7321 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7322 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7323 			DPRINTF(WM_DEBUG_LINK,
7324 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7325 			if ((pcs_adv & TXCW_SYM_PAUSE)
7326 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
7327 				mii->mii_media_active |= IFM_FLOW
7328 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7329 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7330 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7331 			    && (pcs_lpab & TXCW_SYM_PAUSE)
7332 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7333 				mii->mii_media_active |= IFM_FLOW
7334 				    | IFM_ETH_TXPAUSE;
7335 			else if ((pcs_adv & TXCW_SYM_PAUSE)
7336 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7337 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7338 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7339 				mii->mii_media_active |= IFM_FLOW
7340 				    | IFM_ETH_RXPAUSE;
7341 		}
7342 		/* Update LED */
7343 		wm_tbi_serdes_set_linkled(sc);
7344 	} else {
7345 		DPRINTF(WM_DEBUG_LINK,
7346 		    ("%s: LINK: Receive sequence error\n",
7347 		    device_xname(sc->sc_dev)));
7348 	}
7349 }
7350 
7351 /*
7352  * wm_linkintr:
7353  *
7354  *	Helper; handle link interrupts.
7355  */
7356 static void
7357 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7358 {
7359 
7360 	KASSERT(WM_CORE_LOCKED(sc));
7361 
7362 	if (sc->sc_flags & WM_F_HAS_MII)
7363 		wm_linkintr_gmii(sc, icr);
7364 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7365 	    && (sc->sc_type >= WM_T_82575))
7366 		wm_linkintr_serdes(sc, icr);
7367 	else
7368 		wm_linkintr_tbi(sc, icr);
7369 }
7370 
7371 /*
7372  * wm_intr_legacy:
7373  *
7374  *	Interrupt service routine for INTx and MSI.
7375  */
7376 static int
7377 wm_intr_legacy(void *arg)
7378 {
7379 	struct wm_softc *sc = arg;
7380 	struct wm_txqueue *txq = &sc->sc_txq[0];
7381 	struct wm_rxqueue *rxq = &sc->sc_rxq[0];
7382 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7383 	uint32_t icr, rndval = 0;
7384 	int handled = 0;
7385 
7386 	DPRINTF(WM_DEBUG_TX,
7387 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7388 	while (1 /* CONSTCOND */) {
7389 		icr = CSR_READ(sc, WMREG_ICR);
7390 		if ((icr & sc->sc_icr) == 0)
7391 			break;
7392 		if (rndval == 0)
7393 			rndval = icr;
7394 
7395 		WM_RX_LOCK(rxq);
7396 
7397 		if (sc->sc_stopping) {
7398 			WM_RX_UNLOCK(rxq);
7399 			break;
7400 		}
7401 
7402 		handled = 1;
7403 
7404 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7405 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7406 			DPRINTF(WM_DEBUG_RX,
7407 			    ("%s: RX: got Rx intr 0x%08x\n",
7408 			    device_xname(sc->sc_dev),
7409 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
7410 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7411 		}
7412 #endif
7413 		wm_rxeof(rxq);
7414 
7415 		WM_RX_UNLOCK(rxq);
7416 		WM_TX_LOCK(txq);
7417 
7418 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7419 		if (icr & ICR_TXDW) {
7420 			DPRINTF(WM_DEBUG_TX,
7421 			    ("%s: TX: got TXDW interrupt\n",
7422 			    device_xname(sc->sc_dev)));
7423 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
7424 		}
7425 #endif
7426 		wm_txeof(sc);
7427 
7428 		WM_TX_UNLOCK(txq);
7429 		WM_CORE_LOCK(sc);
7430 
7431 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
7432 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7433 			wm_linkintr(sc, icr);
7434 		}
7435 
7436 		WM_CORE_UNLOCK(sc);
7437 
7438 		if (icr & ICR_RXO) {
7439 #if defined(WM_DEBUG)
7440 			log(LOG_WARNING, "%s: Receive overrun\n",
7441 			    device_xname(sc->sc_dev));
7442 #endif /* defined(WM_DEBUG) */
7443 		}
7444 	}
7445 
7446 	rnd_add_uint32(&sc->rnd_source, rndval);
7447 
7448 	if (handled) {
7449 		/* Try to get more packets going. */
7450 		ifp->if_start(ifp);
7451 	}
7452 
7453 	return handled;
7454 }
7455 
7456 /*
7457  * wm_txintr_msix:
7458  *
7459  *	Interrupt service routine for TX complete interrupt for MSI-X.
7460  */
7461 static int
7462 wm_txintr_msix(void *arg)
7463 {
7464 	struct wm_txqueue *txq = arg;
7465 	struct wm_softc *sc = txq->txq_sc;
7466 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7467 	int handled = 0;
7468 
7469 	DPRINTF(WM_DEBUG_TX,
7470 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7471 
7472 	if (sc->sc_type == WM_T_82574)
7473 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(txq->txq_id));
7474 	else if (sc->sc_type == WM_T_82575)
7475 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(txq->txq_id));
7476 	else
7477 		CSR_WRITE(sc, WMREG_EIMC, 1 << txq->txq_intr_idx);
7478 
7479 	WM_TX_LOCK(txq);
7480 
7481 	if (sc->sc_stopping)
7482 		goto out;
7483 
7484 	WM_EVCNT_INCR(&sc->sc_ev_txdw);
7485 	handled = wm_txeof(sc);
7486 
7487 out:
7488 	WM_TX_UNLOCK(txq);
7489 
7490 	if (sc->sc_type == WM_T_82574)
7491 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(txq->txq_id));
7492 	else if (sc->sc_type == WM_T_82575)
7493 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(txq->txq_id));
7494 	else
7495 		CSR_WRITE(sc, WMREG_EIMS, 1 << txq->txq_intr_idx);
7496 
7497 	if (handled) {
7498 		/* Try to get more packets going. */
7499 		ifp->if_start(ifp);
7500 	}
7501 
7502 	return handled;
7503 }
7504 
7505 /*
7506  * wm_rxintr_msix:
7507  *
7508  *	Interrupt service routine for RX interrupt for MSI-X.
7509  */
7510 static int
7511 wm_rxintr_msix(void *arg)
7512 {
7513 	struct wm_rxqueue *rxq = arg;
7514 	struct wm_softc *sc = rxq->rxq_sc;
7515 
7516 	DPRINTF(WM_DEBUG_RX,
7517 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7518 
7519 	if (sc->sc_type == WM_T_82574)
7520 		CSR_WRITE(sc, WMREG_IMC, ICR_RXQ(rxq->rxq_id));
7521 	else if (sc->sc_type == WM_T_82575)
7522 		CSR_WRITE(sc, WMREG_EIMC, EITR_RX_QUEUE(rxq->rxq_id));
7523 	else
7524 		CSR_WRITE(sc, WMREG_EIMC, 1 << rxq->rxq_intr_idx);
7525 
7526 	WM_RX_LOCK(rxq);
7527 
7528 	if (sc->sc_stopping)
7529 		goto out;
7530 
7531 	WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7532 	wm_rxeof(rxq);
7533 
7534 out:
7535 	WM_RX_UNLOCK(rxq);
7536 
7537 	if (sc->sc_type == WM_T_82574)
7538 		CSR_WRITE(sc, WMREG_IMS, ICR_RXQ(rxq->rxq_id));
7539 	else if (sc->sc_type == WM_T_82575)
7540 		CSR_WRITE(sc, WMREG_EIMS, EITR_RX_QUEUE(rxq->rxq_id));
7541 	else
7542 		CSR_WRITE(sc, WMREG_EIMS, 1 << rxq->rxq_intr_idx);
7543 
7544 	return 1;
7545 }
7546 
7547 /*
7548  * wm_linkintr_msix:
7549  *
7550  *	Interrupt service routine for link status change for MSI-X.
7551  */
7552 static int
7553 wm_linkintr_msix(void *arg)
7554 {
7555 	struct wm_softc *sc = arg;
7556 	uint32_t reg;
7557 
7558 	DPRINTF(WM_DEBUG_LINK,
7559 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7560 
7561 	reg = CSR_READ(sc, WMREG_ICR);
7562 	WM_CORE_LOCK(sc);
7563 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7564 		goto out;
7565 
7566 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7567 	wm_linkintr(sc, ICR_LSC);
7568 
7569 out:
7570 	WM_CORE_UNLOCK(sc);
7571 
7572 	if (sc->sc_type == WM_T_82574)
7573 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
7574 	else if (sc->sc_type == WM_T_82575)
7575 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7576 	else
7577 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7578 
7579 	return 1;
7580 }
7581 
7582 /*
7583  * Media related.
7584  * GMII, SGMII, TBI (and SERDES)
7585  */
7586 
7587 /* Common */
7588 
7589 /*
7590  * wm_tbi_serdes_set_linkled:
7591  *
7592  *	Update the link LED on TBI and SERDES devices.
7593  */
7594 static void
7595 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7596 {
7597 
7598 	if (sc->sc_tbi_linkup)
7599 		sc->sc_ctrl |= CTRL_SWDPIN(0);
7600 	else
7601 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7602 
7603 	/* 82540 or newer devices are active low */
7604 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7605 
7606 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7607 }
7608 
7609 /* GMII related */
7610 
7611 /*
7612  * wm_gmii_reset:
7613  *
7614  *	Reset the PHY.
7615  */
7616 static void
7617 wm_gmii_reset(struct wm_softc *sc)
7618 {
7619 	uint32_t reg;
7620 	int rv;
7621 
7622 	/* get phy semaphore */
7623 	switch (sc->sc_type) {
7624 	case WM_T_82571:
7625 	case WM_T_82572:
7626 	case WM_T_82573:
7627 	case WM_T_82574:
7628 	case WM_T_82583:
7629 		 /* XXX should get sw semaphore, too */
7630 		rv = wm_get_swsm_semaphore(sc);
7631 		break;
7632 	case WM_T_82575:
7633 	case WM_T_82576:
7634 	case WM_T_82580:
7635 	case WM_T_I350:
7636 	case WM_T_I354:
7637 	case WM_T_I210:
7638 	case WM_T_I211:
7639 	case WM_T_80003:
7640 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7641 		break;
7642 	case WM_T_ICH8:
7643 	case WM_T_ICH9:
7644 	case WM_T_ICH10:
7645 	case WM_T_PCH:
7646 	case WM_T_PCH2:
7647 	case WM_T_PCH_LPT:
7648 		rv = wm_get_swfwhw_semaphore(sc);
7649 		break;
7650 	default:
7651 		/* nothing to do*/
7652 		rv = 0;
7653 		break;
7654 	}
7655 	if (rv != 0) {
7656 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7657 		    __func__);
7658 		return;
7659 	}
7660 
7661 	switch (sc->sc_type) {
7662 	case WM_T_82542_2_0:
7663 	case WM_T_82542_2_1:
7664 		/* null */
7665 		break;
7666 	case WM_T_82543:
7667 		/*
7668 		 * With 82543, we need to force speed and duplex on the MAC
7669 		 * equal to what the PHY speed and duplex configuration is.
7670 		 * In addition, we need to perform a hardware reset on the PHY
7671 		 * to take it out of reset.
7672 		 */
7673 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7674 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7675 
7676 		/* The PHY reset pin is active-low. */
7677 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7678 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7679 		    CTRL_EXT_SWDPIN(4));
7680 		reg |= CTRL_EXT_SWDPIO(4);
7681 
7682 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7683 		CSR_WRITE_FLUSH(sc);
7684 		delay(10*1000);
7685 
7686 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7687 		CSR_WRITE_FLUSH(sc);
7688 		delay(150);
7689 #if 0
7690 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7691 #endif
7692 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
7693 		break;
7694 	case WM_T_82544:	/* reset 10000us */
7695 	case WM_T_82540:
7696 	case WM_T_82545:
7697 	case WM_T_82545_3:
7698 	case WM_T_82546:
7699 	case WM_T_82546_3:
7700 	case WM_T_82541:
7701 	case WM_T_82541_2:
7702 	case WM_T_82547:
7703 	case WM_T_82547_2:
7704 	case WM_T_82571:	/* reset 100us */
7705 	case WM_T_82572:
7706 	case WM_T_82573:
7707 	case WM_T_82574:
7708 	case WM_T_82575:
7709 	case WM_T_82576:
7710 	case WM_T_82580:
7711 	case WM_T_I350:
7712 	case WM_T_I354:
7713 	case WM_T_I210:
7714 	case WM_T_I211:
7715 	case WM_T_82583:
7716 	case WM_T_80003:
7717 		/* generic reset */
7718 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7719 		CSR_WRITE_FLUSH(sc);
7720 		delay(20000);
7721 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7722 		CSR_WRITE_FLUSH(sc);
7723 		delay(20000);
7724 
7725 		if ((sc->sc_type == WM_T_82541)
7726 		    || (sc->sc_type == WM_T_82541_2)
7727 		    || (sc->sc_type == WM_T_82547)
7728 		    || (sc->sc_type == WM_T_82547_2)) {
7729 			/* workaround for igp are done in igp_reset() */
7730 			/* XXX add code to set LED after phy reset */
7731 		}
7732 		break;
7733 	case WM_T_ICH8:
7734 	case WM_T_ICH9:
7735 	case WM_T_ICH10:
7736 	case WM_T_PCH:
7737 	case WM_T_PCH2:
7738 	case WM_T_PCH_LPT:
7739 		/* generic reset */
7740 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7741 		CSR_WRITE_FLUSH(sc);
7742 		delay(100);
7743 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7744 		CSR_WRITE_FLUSH(sc);
7745 		delay(150);
7746 		break;
7747 	default:
7748 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7749 		    __func__);
7750 		break;
7751 	}
7752 
7753 	/* release PHY semaphore */
7754 	switch (sc->sc_type) {
7755 	case WM_T_82571:
7756 	case WM_T_82572:
7757 	case WM_T_82573:
7758 	case WM_T_82574:
7759 	case WM_T_82583:
7760 		 /* XXX should put sw semaphore, too */
7761 		wm_put_swsm_semaphore(sc);
7762 		break;
7763 	case WM_T_82575:
7764 	case WM_T_82576:
7765 	case WM_T_82580:
7766 	case WM_T_I350:
7767 	case WM_T_I354:
7768 	case WM_T_I210:
7769 	case WM_T_I211:
7770 	case WM_T_80003:
7771 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7772 		break;
7773 	case WM_T_ICH8:
7774 	case WM_T_ICH9:
7775 	case WM_T_ICH10:
7776 	case WM_T_PCH:
7777 	case WM_T_PCH2:
7778 	case WM_T_PCH_LPT:
7779 		wm_put_swfwhw_semaphore(sc);
7780 		break;
7781 	default:
7782 		/* nothing to do*/
7783 		rv = 0;
7784 		break;
7785 	}
7786 
7787 	/* get_cfg_done */
7788 	wm_get_cfg_done(sc);
7789 
7790 	/* extra setup */
7791 	switch (sc->sc_type) {
7792 	case WM_T_82542_2_0:
7793 	case WM_T_82542_2_1:
7794 	case WM_T_82543:
7795 	case WM_T_82544:
7796 	case WM_T_82540:
7797 	case WM_T_82545:
7798 	case WM_T_82545_3:
7799 	case WM_T_82546:
7800 	case WM_T_82546_3:
7801 	case WM_T_82541_2:
7802 	case WM_T_82547_2:
7803 	case WM_T_82571:
7804 	case WM_T_82572:
7805 	case WM_T_82573:
7806 	case WM_T_82575:
7807 	case WM_T_82576:
7808 	case WM_T_82580:
7809 	case WM_T_I350:
7810 	case WM_T_I354:
7811 	case WM_T_I210:
7812 	case WM_T_I211:
7813 	case WM_T_80003:
7814 		/* null */
7815 		break;
7816 	case WM_T_82574:
7817 	case WM_T_82583:
7818 		wm_lplu_d0_disable(sc);
7819 		break;
7820 	case WM_T_82541:
7821 	case WM_T_82547:
7822 		/* XXX Configure actively LED after PHY reset */
7823 		break;
7824 	case WM_T_ICH8:
7825 	case WM_T_ICH9:
7826 	case WM_T_ICH10:
7827 	case WM_T_PCH:
7828 	case WM_T_PCH2:
7829 	case WM_T_PCH_LPT:
7830 		/* Allow time for h/w to get to a quiescent state afer reset */
7831 		delay(10*1000);
7832 
7833 		if (sc->sc_type == WM_T_PCH)
7834 			wm_hv_phy_workaround_ich8lan(sc);
7835 
7836 		if (sc->sc_type == WM_T_PCH2)
7837 			wm_lv_phy_workaround_ich8lan(sc);
7838 
7839 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7840 			/*
7841 			 * dummy read to clear the phy wakeup bit after lcd
7842 			 * reset
7843 			 */
7844 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7845 		}
7846 
7847 		/*
7848 		 * XXX Configure the LCD with th extended configuration region
7849 		 * in NVM
7850 		 */
7851 
7852 		/* Disable D0 LPLU. */
7853 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
7854 			wm_lplu_d0_disable_pch(sc);
7855 		else
7856 			wm_lplu_d0_disable(sc);	/* ICH* */
7857 		break;
7858 	default:
7859 		panic("%s: unknown type\n", __func__);
7860 		break;
7861 	}
7862 }
7863 
7864 /*
7865  * wm_get_phy_id_82575:
7866  *
7867  * Return PHY ID. Return -1 if it failed.
7868  */
7869 static int
7870 wm_get_phy_id_82575(struct wm_softc *sc)
7871 {
7872 	uint32_t reg;
7873 	int phyid = -1;
7874 
7875 	/* XXX */
7876 	if ((sc->sc_flags & WM_F_SGMII) == 0)
7877 		return -1;
7878 
7879 	if (wm_sgmii_uses_mdio(sc)) {
7880 		switch (sc->sc_type) {
7881 		case WM_T_82575:
7882 		case WM_T_82576:
7883 			reg = CSR_READ(sc, WMREG_MDIC);
7884 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7885 			break;
7886 		case WM_T_82580:
7887 		case WM_T_I350:
7888 		case WM_T_I354:
7889 		case WM_T_I210:
7890 		case WM_T_I211:
7891 			reg = CSR_READ(sc, WMREG_MDICNFG);
7892 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
7893 			break;
7894 		default:
7895 			return -1;
7896 		}
7897 	}
7898 
7899 	return phyid;
7900 }
7901 
7902 
7903 /*
7904  * wm_gmii_mediainit:
7905  *
7906  *	Initialize media for use on 1000BASE-T devices.
7907  */
7908 static void
7909 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
7910 {
7911 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7912 	struct mii_data *mii = &sc->sc_mii;
7913 	uint32_t reg;
7914 
7915 	/* We have GMII. */
7916 	sc->sc_flags |= WM_F_HAS_MII;
7917 
7918 	if (sc->sc_type == WM_T_80003)
7919 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
7920 	else
7921 		sc->sc_tipg = TIPG_1000T_DFLT;
7922 
7923 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
7924 	if ((sc->sc_type == WM_T_82580)
7925 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
7926 	    || (sc->sc_type == WM_T_I211)) {
7927 		reg = CSR_READ(sc, WMREG_PHPM);
7928 		reg &= ~PHPM_GO_LINK_D;
7929 		CSR_WRITE(sc, WMREG_PHPM, reg);
7930 	}
7931 
7932 	/*
7933 	 * Let the chip set speed/duplex on its own based on
7934 	 * signals from the PHY.
7935 	 * XXXbouyer - I'm not sure this is right for the 80003,
7936 	 * the em driver only sets CTRL_SLU here - but it seems to work.
7937 	 */
7938 	sc->sc_ctrl |= CTRL_SLU;
7939 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7940 
7941 	/* Initialize our media structures and probe the GMII. */
7942 	mii->mii_ifp = ifp;
7943 
7944 	/*
7945 	 * Determine the PHY access method.
7946 	 *
7947 	 *  For SGMII, use SGMII specific method.
7948 	 *
7949 	 *  For some devices, we can determine the PHY access method
7950 	 * from sc_type.
7951 	 *
7952 	 *  For ICH and PCH variants, it's difficult to determine the PHY
7953 	 * access  method by sc_type, so use the PCI product ID for some
7954 	 * devices.
7955 	 * For other ICH8 variants, try to use igp's method. If the PHY
7956 	 * can't detect, then use bm's method.
7957 	 */
7958 	switch (prodid) {
7959 	case PCI_PRODUCT_INTEL_PCH_M_LM:
7960 	case PCI_PRODUCT_INTEL_PCH_M_LC:
7961 		/* 82577 */
7962 		sc->sc_phytype = WMPHY_82577;
7963 		break;
7964 	case PCI_PRODUCT_INTEL_PCH_D_DM:
7965 	case PCI_PRODUCT_INTEL_PCH_D_DC:
7966 		/* 82578 */
7967 		sc->sc_phytype = WMPHY_82578;
7968 		break;
7969 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
7970 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
7971 		/* 82579 */
7972 		sc->sc_phytype = WMPHY_82579;
7973 		break;
7974 	case PCI_PRODUCT_INTEL_82801I_BM:
7975 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
7976 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
7977 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
7978 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
7979 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
7980 		/* 82567 */
7981 		sc->sc_phytype = WMPHY_BM;
7982 		mii->mii_readreg = wm_gmii_bm_readreg;
7983 		mii->mii_writereg = wm_gmii_bm_writereg;
7984 		break;
7985 	default:
7986 		if (((sc->sc_flags & WM_F_SGMII) != 0)
7987 		    && !wm_sgmii_uses_mdio(sc)){
7988 			/* SGMII */
7989 			mii->mii_readreg = wm_sgmii_readreg;
7990 			mii->mii_writereg = wm_sgmii_writereg;
7991 		} else if (sc->sc_type >= WM_T_80003) {
7992 			/* 80003 */
7993 			mii->mii_readreg = wm_gmii_i80003_readreg;
7994 			mii->mii_writereg = wm_gmii_i80003_writereg;
7995 		} else if (sc->sc_type >= WM_T_I210) {
7996 			/* I210 and I211 */
7997 			mii->mii_readreg = wm_gmii_gs40g_readreg;
7998 			mii->mii_writereg = wm_gmii_gs40g_writereg;
7999 		} else if (sc->sc_type >= WM_T_82580) {
8000 			/* 82580, I350 and I354 */
8001 			sc->sc_phytype = WMPHY_82580;
8002 			mii->mii_readreg = wm_gmii_82580_readreg;
8003 			mii->mii_writereg = wm_gmii_82580_writereg;
8004 		} else if (sc->sc_type >= WM_T_82544) {
8005 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
8006 			mii->mii_readreg = wm_gmii_i82544_readreg;
8007 			mii->mii_writereg = wm_gmii_i82544_writereg;
8008 		} else {
8009 			mii->mii_readreg = wm_gmii_i82543_readreg;
8010 			mii->mii_writereg = wm_gmii_i82543_writereg;
8011 		}
8012 		break;
8013 	}
8014 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_LPT)) {
8015 		/* All PCH* use _hv_ */
8016 		mii->mii_readreg = wm_gmii_hv_readreg;
8017 		mii->mii_writereg = wm_gmii_hv_writereg;
8018 	}
8019 	mii->mii_statchg = wm_gmii_statchg;
8020 
8021 	wm_gmii_reset(sc);
8022 
8023 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
8024 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8025 	    wm_gmii_mediastatus);
8026 
8027 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8028 	    || (sc->sc_type == WM_T_82580)
8029 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8030 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8031 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
8032 			/* Attach only one port */
8033 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8034 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
8035 		} else {
8036 			int i, id;
8037 			uint32_t ctrl_ext;
8038 
8039 			id = wm_get_phy_id_82575(sc);
8040 			if (id != -1) {
8041 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8042 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8043 			}
8044 			if ((id == -1)
8045 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8046 				/* Power on sgmii phy if it is disabled */
8047 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8048 				CSR_WRITE(sc, WMREG_CTRL_EXT,
8049 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8050 				CSR_WRITE_FLUSH(sc);
8051 				delay(300*1000); /* XXX too long */
8052 
8053 				/* from 1 to 8 */
8054 				for (i = 1; i < 8; i++)
8055 					mii_attach(sc->sc_dev, &sc->sc_mii,
8056 					    0xffffffff, i, MII_OFFSET_ANY,
8057 					    MIIF_DOPAUSE);
8058 
8059 				/* restore previous sfp cage power state */
8060 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8061 			}
8062 		}
8063 	} else {
8064 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8065 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8066 	}
8067 
8068 	/*
8069 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8070 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8071 	 */
8072 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8073 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
8074 		wm_set_mdio_slow_mode_hv(sc);
8075 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8076 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8077 	}
8078 
8079 	/*
8080 	 * (For ICH8 variants)
8081 	 * If PHY detection failed, use BM's r/w function and retry.
8082 	 */
8083 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8084 		/* if failed, retry with *_bm_* */
8085 		mii->mii_readreg = wm_gmii_bm_readreg;
8086 		mii->mii_writereg = wm_gmii_bm_writereg;
8087 
8088 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8089 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8090 	}
8091 
8092 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8093 		/* Any PHY wasn't find */
8094 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8095 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8096 		sc->sc_phytype = WMPHY_NONE;
8097 	} else {
8098 		/*
8099 		 * PHY Found!
8100 		 * Check PHY type.
8101 		 */
8102 		uint32_t model;
8103 		struct mii_softc *child;
8104 
8105 		child = LIST_FIRST(&mii->mii_phys);
8106 		model = child->mii_mpd_model;
8107 		if (model == MII_MODEL_yyINTEL_I82566)
8108 			sc->sc_phytype = WMPHY_IGP_3;
8109 
8110 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8111 	}
8112 }
8113 
8114 /*
8115  * wm_gmii_mediachange:	[ifmedia interface function]
8116  *
8117  *	Set hardware to newly-selected media on a 1000BASE-T device.
8118  */
8119 static int
8120 wm_gmii_mediachange(struct ifnet *ifp)
8121 {
8122 	struct wm_softc *sc = ifp->if_softc;
8123 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8124 	int rc;
8125 
8126 	if ((ifp->if_flags & IFF_UP) == 0)
8127 		return 0;
8128 
8129 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8130 	sc->sc_ctrl |= CTRL_SLU;
8131 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8132 	    || (sc->sc_type > WM_T_82543)) {
8133 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8134 	} else {
8135 		sc->sc_ctrl &= ~CTRL_ASDE;
8136 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8137 		if (ife->ifm_media & IFM_FDX)
8138 			sc->sc_ctrl |= CTRL_FD;
8139 		switch (IFM_SUBTYPE(ife->ifm_media)) {
8140 		case IFM_10_T:
8141 			sc->sc_ctrl |= CTRL_SPEED_10;
8142 			break;
8143 		case IFM_100_TX:
8144 			sc->sc_ctrl |= CTRL_SPEED_100;
8145 			break;
8146 		case IFM_1000_T:
8147 			sc->sc_ctrl |= CTRL_SPEED_1000;
8148 			break;
8149 		default:
8150 			panic("wm_gmii_mediachange: bad media 0x%x",
8151 			    ife->ifm_media);
8152 		}
8153 	}
8154 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8155 	if (sc->sc_type <= WM_T_82543)
8156 		wm_gmii_reset(sc);
8157 
8158 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8159 		return 0;
8160 	return rc;
8161 }
8162 
8163 /*
8164  * wm_gmii_mediastatus:	[ifmedia interface function]
8165  *
8166  *	Get the current interface media status on a 1000BASE-T device.
8167  */
8168 static void
8169 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8170 {
8171 	struct wm_softc *sc = ifp->if_softc;
8172 
8173 	ether_mediastatus(ifp, ifmr);
8174 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8175 	    | sc->sc_flowflags;
8176 }
8177 
8178 #define	MDI_IO		CTRL_SWDPIN(2)
8179 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
8180 #define	MDI_CLK		CTRL_SWDPIN(3)
8181 
8182 static void
8183 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8184 {
8185 	uint32_t i, v;
8186 
8187 	v = CSR_READ(sc, WMREG_CTRL);
8188 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8189 	v |= MDI_DIR | CTRL_SWDPIO(3);
8190 
8191 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8192 		if (data & i)
8193 			v |= MDI_IO;
8194 		else
8195 			v &= ~MDI_IO;
8196 		CSR_WRITE(sc, WMREG_CTRL, v);
8197 		CSR_WRITE_FLUSH(sc);
8198 		delay(10);
8199 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8200 		CSR_WRITE_FLUSH(sc);
8201 		delay(10);
8202 		CSR_WRITE(sc, WMREG_CTRL, v);
8203 		CSR_WRITE_FLUSH(sc);
8204 		delay(10);
8205 	}
8206 }
8207 
8208 static uint32_t
8209 wm_i82543_mii_recvbits(struct wm_softc *sc)
8210 {
8211 	uint32_t v, i, data = 0;
8212 
8213 	v = CSR_READ(sc, WMREG_CTRL);
8214 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8215 	v |= CTRL_SWDPIO(3);
8216 
8217 	CSR_WRITE(sc, WMREG_CTRL, v);
8218 	CSR_WRITE_FLUSH(sc);
8219 	delay(10);
8220 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8221 	CSR_WRITE_FLUSH(sc);
8222 	delay(10);
8223 	CSR_WRITE(sc, WMREG_CTRL, v);
8224 	CSR_WRITE_FLUSH(sc);
8225 	delay(10);
8226 
8227 	for (i = 0; i < 16; i++) {
8228 		data <<= 1;
8229 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8230 		CSR_WRITE_FLUSH(sc);
8231 		delay(10);
8232 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8233 			data |= 1;
8234 		CSR_WRITE(sc, WMREG_CTRL, v);
8235 		CSR_WRITE_FLUSH(sc);
8236 		delay(10);
8237 	}
8238 
8239 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8240 	CSR_WRITE_FLUSH(sc);
8241 	delay(10);
8242 	CSR_WRITE(sc, WMREG_CTRL, v);
8243 	CSR_WRITE_FLUSH(sc);
8244 	delay(10);
8245 
8246 	return data;
8247 }
8248 
8249 #undef MDI_IO
8250 #undef MDI_DIR
8251 #undef MDI_CLK
8252 
8253 /*
8254  * wm_gmii_i82543_readreg:	[mii interface function]
8255  *
8256  *	Read a PHY register on the GMII (i82543 version).
8257  */
8258 static int
8259 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8260 {
8261 	struct wm_softc *sc = device_private(self);
8262 	int rv;
8263 
8264 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8265 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8266 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8267 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8268 
8269 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8270 	    device_xname(sc->sc_dev), phy, reg, rv));
8271 
8272 	return rv;
8273 }
8274 
8275 /*
8276  * wm_gmii_i82543_writereg:	[mii interface function]
8277  *
8278  *	Write a PHY register on the GMII (i82543 version).
8279  */
8280 static void
8281 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8282 {
8283 	struct wm_softc *sc = device_private(self);
8284 
8285 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8286 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8287 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8288 	    (MII_COMMAND_START << 30), 32);
8289 }
8290 
8291 /*
8292  * wm_gmii_i82544_readreg:	[mii interface function]
8293  *
8294  *	Read a PHY register on the GMII.
8295  */
8296 static int
8297 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8298 {
8299 	struct wm_softc *sc = device_private(self);
8300 	uint32_t mdic = 0;
8301 	int i, rv;
8302 
8303 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8304 	    MDIC_REGADD(reg));
8305 
8306 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8307 		mdic = CSR_READ(sc, WMREG_MDIC);
8308 		if (mdic & MDIC_READY)
8309 			break;
8310 		delay(50);
8311 	}
8312 
8313 	if ((mdic & MDIC_READY) == 0) {
8314 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8315 		    device_xname(sc->sc_dev), phy, reg);
8316 		rv = 0;
8317 	} else if (mdic & MDIC_E) {
8318 #if 0 /* This is normal if no PHY is present. */
8319 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8320 		    device_xname(sc->sc_dev), phy, reg);
8321 #endif
8322 		rv = 0;
8323 	} else {
8324 		rv = MDIC_DATA(mdic);
8325 		if (rv == 0xffff)
8326 			rv = 0;
8327 	}
8328 
8329 	return rv;
8330 }
8331 
8332 /*
8333  * wm_gmii_i82544_writereg:	[mii interface function]
8334  *
8335  *	Write a PHY register on the GMII.
8336  */
8337 static void
8338 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8339 {
8340 	struct wm_softc *sc = device_private(self);
8341 	uint32_t mdic = 0;
8342 	int i;
8343 
8344 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8345 	    MDIC_REGADD(reg) | MDIC_DATA(val));
8346 
8347 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8348 		mdic = CSR_READ(sc, WMREG_MDIC);
8349 		if (mdic & MDIC_READY)
8350 			break;
8351 		delay(50);
8352 	}
8353 
8354 	if ((mdic & MDIC_READY) == 0)
8355 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8356 		    device_xname(sc->sc_dev), phy, reg);
8357 	else if (mdic & MDIC_E)
8358 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8359 		    device_xname(sc->sc_dev), phy, reg);
8360 }
8361 
8362 /*
8363  * wm_gmii_i80003_readreg:	[mii interface function]
8364  *
8365  *	Read a PHY register on the kumeran
8366  * This could be handled by the PHY layer if we didn't have to lock the
8367  * ressource ...
8368  */
8369 static int
8370 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8371 {
8372 	struct wm_softc *sc = device_private(self);
8373 	int sem;
8374 	int rv;
8375 
8376 	if (phy != 1) /* only one PHY on kumeran bus */
8377 		return 0;
8378 
8379 	sem = swfwphysem[sc->sc_funcid];
8380 	if (wm_get_swfw_semaphore(sc, sem)) {
8381 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8382 		    __func__);
8383 		return 0;
8384 	}
8385 
8386 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8387 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8388 		    reg >> GG82563_PAGE_SHIFT);
8389 	} else {
8390 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8391 		    reg >> GG82563_PAGE_SHIFT);
8392 	}
8393 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8394 	delay(200);
8395 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8396 	delay(200);
8397 
8398 	wm_put_swfw_semaphore(sc, sem);
8399 	return rv;
8400 }
8401 
8402 /*
8403  * wm_gmii_i80003_writereg:	[mii interface function]
8404  *
8405  *	Write a PHY register on the kumeran.
8406  * This could be handled by the PHY layer if we didn't have to lock the
8407  * ressource ...
8408  */
8409 static void
8410 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8411 {
8412 	struct wm_softc *sc = device_private(self);
8413 	int sem;
8414 
8415 	if (phy != 1) /* only one PHY on kumeran bus */
8416 		return;
8417 
8418 	sem = swfwphysem[sc->sc_funcid];
8419 	if (wm_get_swfw_semaphore(sc, sem)) {
8420 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8421 		    __func__);
8422 		return;
8423 	}
8424 
8425 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8426 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8427 		    reg >> GG82563_PAGE_SHIFT);
8428 	} else {
8429 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8430 		    reg >> GG82563_PAGE_SHIFT);
8431 	}
8432 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8433 	delay(200);
8434 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8435 	delay(200);
8436 
8437 	wm_put_swfw_semaphore(sc, sem);
8438 }
8439 
8440 /*
8441  * wm_gmii_bm_readreg:	[mii interface function]
8442  *
8443  *	Read a PHY register on the kumeran
8444  * This could be handled by the PHY layer if we didn't have to lock the
8445  * ressource ...
8446  */
8447 static int
8448 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8449 {
8450 	struct wm_softc *sc = device_private(self);
8451 	int sem;
8452 	int rv;
8453 
8454 	sem = swfwphysem[sc->sc_funcid];
8455 	if (wm_get_swfw_semaphore(sc, sem)) {
8456 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8457 		    __func__);
8458 		return 0;
8459 	}
8460 
8461 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8462 		if (phy == 1)
8463 			wm_gmii_i82544_writereg(self, phy,
8464 			    MII_IGPHY_PAGE_SELECT, reg);
8465 		else
8466 			wm_gmii_i82544_writereg(self, phy,
8467 			    GG82563_PHY_PAGE_SELECT,
8468 			    reg >> GG82563_PAGE_SHIFT);
8469 	}
8470 
8471 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8472 	wm_put_swfw_semaphore(sc, sem);
8473 	return rv;
8474 }
8475 
8476 /*
8477  * wm_gmii_bm_writereg:	[mii interface function]
8478  *
8479  *	Write a PHY register on the kumeran.
8480  * This could be handled by the PHY layer if we didn't have to lock the
8481  * ressource ...
8482  */
8483 static void
8484 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8485 {
8486 	struct wm_softc *sc = device_private(self);
8487 	int sem;
8488 
8489 	sem = swfwphysem[sc->sc_funcid];
8490 	if (wm_get_swfw_semaphore(sc, sem)) {
8491 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8492 		    __func__);
8493 		return;
8494 	}
8495 
8496 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8497 		if (phy == 1)
8498 			wm_gmii_i82544_writereg(self, phy,
8499 			    MII_IGPHY_PAGE_SELECT, reg);
8500 		else
8501 			wm_gmii_i82544_writereg(self, phy,
8502 			    GG82563_PHY_PAGE_SELECT,
8503 			    reg >> GG82563_PAGE_SHIFT);
8504 	}
8505 
8506 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8507 	wm_put_swfw_semaphore(sc, sem);
8508 }
8509 
8510 static void
8511 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8512 {
8513 	struct wm_softc *sc = device_private(self);
8514 	uint16_t regnum = BM_PHY_REG_NUM(offset);
8515 	uint16_t wuce;
8516 
8517 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
8518 	if (sc->sc_type == WM_T_PCH) {
8519 		/* XXX e1000 driver do nothing... why? */
8520 	}
8521 
8522 	/* Set page 769 */
8523 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8524 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8525 
8526 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
8527 
8528 	wuce &= ~BM_WUC_HOST_WU_BIT;
8529 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
8530 	    wuce | BM_WUC_ENABLE_BIT);
8531 
8532 	/* Select page 800 */
8533 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8534 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8535 
8536 	/* Write page 800 */
8537 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8538 
8539 	if (rd)
8540 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
8541 	else
8542 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8543 
8544 	/* Set page 769 */
8545 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8546 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8547 
8548 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8549 }
8550 
8551 /*
8552  * wm_gmii_hv_readreg:	[mii interface function]
8553  *
8554  *	Read a PHY register on the kumeran
8555  * This could be handled by the PHY layer if we didn't have to lock the
8556  * ressource ...
8557  */
8558 static int
8559 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8560 {
8561 	struct wm_softc *sc = device_private(self);
8562 	uint16_t page = BM_PHY_REG_PAGE(reg);
8563 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8564 	uint16_t val;
8565 	int rv;
8566 
8567 	if (wm_get_swfwhw_semaphore(sc)) {
8568 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8569 		    __func__);
8570 		return 0;
8571 	}
8572 
8573 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8574 	if (sc->sc_phytype == WMPHY_82577) {
8575 		/* XXX must write */
8576 	}
8577 
8578 	/* Page 800 works differently than the rest so it has its own func */
8579 	if (page == BM_WUC_PAGE) {
8580 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8581 		return val;
8582 	}
8583 
8584 	/*
8585 	 * Lower than page 768 works differently than the rest so it has its
8586 	 * own func
8587 	 */
8588 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8589 		printf("gmii_hv_readreg!!!\n");
8590 		return 0;
8591 	}
8592 
8593 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8594 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8595 		    page << BME1000_PAGE_SHIFT);
8596 	}
8597 
8598 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8599 	wm_put_swfwhw_semaphore(sc);
8600 	return rv;
8601 }
8602 
8603 /*
8604  * wm_gmii_hv_writereg:	[mii interface function]
8605  *
8606  *	Write a PHY register on the kumeran.
8607  * This could be handled by the PHY layer if we didn't have to lock the
8608  * ressource ...
8609  */
8610 static void
8611 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8612 {
8613 	struct wm_softc *sc = device_private(self);
8614 	uint16_t page = BM_PHY_REG_PAGE(reg);
8615 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8616 
8617 	if (wm_get_swfwhw_semaphore(sc)) {
8618 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8619 		    __func__);
8620 		return;
8621 	}
8622 
8623 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8624 
8625 	/* Page 800 works differently than the rest so it has its own func */
8626 	if (page == BM_WUC_PAGE) {
8627 		uint16_t tmp;
8628 
8629 		tmp = val;
8630 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8631 		return;
8632 	}
8633 
8634 	/*
8635 	 * Lower than page 768 works differently than the rest so it has its
8636 	 * own func
8637 	 */
8638 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8639 		printf("gmii_hv_writereg!!!\n");
8640 		return;
8641 	}
8642 
8643 	/*
8644 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
8645 	 * Power Down (whenever bit 11 of the PHY control register is set)
8646 	 */
8647 
8648 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8649 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8650 		    page << BME1000_PAGE_SHIFT);
8651 	}
8652 
8653 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8654 	wm_put_swfwhw_semaphore(sc);
8655 }
8656 
8657 /*
8658  * wm_gmii_82580_readreg:	[mii interface function]
8659  *
8660  *	Read a PHY register on the 82580 and I350.
8661  * This could be handled by the PHY layer if we didn't have to lock the
8662  * ressource ...
8663  */
8664 static int
8665 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8666 {
8667 	struct wm_softc *sc = device_private(self);
8668 	int sem;
8669 	int rv;
8670 
8671 	sem = swfwphysem[sc->sc_funcid];
8672 	if (wm_get_swfw_semaphore(sc, sem)) {
8673 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8674 		    __func__);
8675 		return 0;
8676 	}
8677 
8678 	rv = wm_gmii_i82544_readreg(self, phy, reg);
8679 
8680 	wm_put_swfw_semaphore(sc, sem);
8681 	return rv;
8682 }
8683 
8684 /*
8685  * wm_gmii_82580_writereg:	[mii interface function]
8686  *
8687  *	Write a PHY register on the 82580 and I350.
8688  * This could be handled by the PHY layer if we didn't have to lock the
8689  * ressource ...
8690  */
8691 static void
8692 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8693 {
8694 	struct wm_softc *sc = device_private(self);
8695 	int sem;
8696 
8697 	sem = swfwphysem[sc->sc_funcid];
8698 	if (wm_get_swfw_semaphore(sc, sem)) {
8699 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8700 		    __func__);
8701 		return;
8702 	}
8703 
8704 	wm_gmii_i82544_writereg(self, phy, reg, val);
8705 
8706 	wm_put_swfw_semaphore(sc, sem);
8707 }
8708 
8709 /*
8710  * wm_gmii_gs40g_readreg:	[mii interface function]
8711  *
8712  *	Read a PHY register on the I2100 and I211.
8713  * This could be handled by the PHY layer if we didn't have to lock the
8714  * ressource ...
8715  */
8716 static int
8717 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8718 {
8719 	struct wm_softc *sc = device_private(self);
8720 	int sem;
8721 	int page, offset;
8722 	int rv;
8723 
8724 	/* Acquire semaphore */
8725 	sem = swfwphysem[sc->sc_funcid];
8726 	if (wm_get_swfw_semaphore(sc, sem)) {
8727 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8728 		    __func__);
8729 		return 0;
8730 	}
8731 
8732 	/* Page select */
8733 	page = reg >> GS40G_PAGE_SHIFT;
8734 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8735 
8736 	/* Read reg */
8737 	offset = reg & GS40G_OFFSET_MASK;
8738 	rv = wm_gmii_i82544_readreg(self, phy, offset);
8739 
8740 	wm_put_swfw_semaphore(sc, sem);
8741 	return rv;
8742 }
8743 
8744 /*
8745  * wm_gmii_gs40g_writereg:	[mii interface function]
8746  *
8747  *	Write a PHY register on the I210 and I211.
8748  * This could be handled by the PHY layer if we didn't have to lock the
8749  * ressource ...
8750  */
8751 static void
8752 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8753 {
8754 	struct wm_softc *sc = device_private(self);
8755 	int sem;
8756 	int page, offset;
8757 
8758 	/* Acquire semaphore */
8759 	sem = swfwphysem[sc->sc_funcid];
8760 	if (wm_get_swfw_semaphore(sc, sem)) {
8761 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8762 		    __func__);
8763 		return;
8764 	}
8765 
8766 	/* Page select */
8767 	page = reg >> GS40G_PAGE_SHIFT;
8768 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8769 
8770 	/* Write reg */
8771 	offset = reg & GS40G_OFFSET_MASK;
8772 	wm_gmii_i82544_writereg(self, phy, offset, val);
8773 
8774 	/* Release semaphore */
8775 	wm_put_swfw_semaphore(sc, sem);
8776 }
8777 
8778 /*
8779  * wm_gmii_statchg:	[mii interface function]
8780  *
8781  *	Callback from MII layer when media changes.
8782  */
8783 static void
8784 wm_gmii_statchg(struct ifnet *ifp)
8785 {
8786 	struct wm_softc *sc = ifp->if_softc;
8787 	struct mii_data *mii = &sc->sc_mii;
8788 
8789 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
8790 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8791 	sc->sc_fcrtl &= ~FCRTL_XONE;
8792 
8793 	/*
8794 	 * Get flow control negotiation result.
8795 	 */
8796 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
8797 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
8798 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
8799 		mii->mii_media_active &= ~IFM_ETH_FMASK;
8800 	}
8801 
8802 	if (sc->sc_flowflags & IFM_FLOW) {
8803 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
8804 			sc->sc_ctrl |= CTRL_TFCE;
8805 			sc->sc_fcrtl |= FCRTL_XONE;
8806 		}
8807 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
8808 			sc->sc_ctrl |= CTRL_RFCE;
8809 	}
8810 
8811 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
8812 		DPRINTF(WM_DEBUG_LINK,
8813 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8814 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8815 	} else {
8816 		DPRINTF(WM_DEBUG_LINK,
8817 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8818 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8819 	}
8820 
8821 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8822 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8823 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8824 						 : WMREG_FCRTL, sc->sc_fcrtl);
8825 	if (sc->sc_type == WM_T_80003) {
8826 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8827 		case IFM_1000_T:
8828 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8829 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8830 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
8831 			break;
8832 		default:
8833 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8834 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8835 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
8836 			break;
8837 		}
8838 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8839 	}
8840 }
8841 
8842 /*
8843  * wm_kmrn_readreg:
8844  *
8845  *	Read a kumeran register
8846  */
8847 static int
8848 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8849 {
8850 	int rv;
8851 
8852 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
8853 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8854 			aprint_error_dev(sc->sc_dev,
8855 			    "%s: failed to get semaphore\n", __func__);
8856 			return 0;
8857 		}
8858 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8859 		if (wm_get_swfwhw_semaphore(sc)) {
8860 			aprint_error_dev(sc->sc_dev,
8861 			    "%s: failed to get semaphore\n", __func__);
8862 			return 0;
8863 		}
8864 	}
8865 
8866 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8867 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8868 	    KUMCTRLSTA_REN);
8869 	CSR_WRITE_FLUSH(sc);
8870 	delay(2);
8871 
8872 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8873 
8874 	if (sc->sc_flags & WM_F_LOCK_SWFW)
8875 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8876 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8877 		wm_put_swfwhw_semaphore(sc);
8878 
8879 	return rv;
8880 }
8881 
8882 /*
8883  * wm_kmrn_writereg:
8884  *
8885  *	Write a kumeran register
8886  */
8887 static void
8888 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8889 {
8890 
8891 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
8892 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8893 			aprint_error_dev(sc->sc_dev,
8894 			    "%s: failed to get semaphore\n", __func__);
8895 			return;
8896 		}
8897 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8898 		if (wm_get_swfwhw_semaphore(sc)) {
8899 			aprint_error_dev(sc->sc_dev,
8900 			    "%s: failed to get semaphore\n", __func__);
8901 			return;
8902 		}
8903 	}
8904 
8905 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8906 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8907 	    (val & KUMCTRLSTA_MASK));
8908 
8909 	if (sc->sc_flags & WM_F_LOCK_SWFW)
8910 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8911 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8912 		wm_put_swfwhw_semaphore(sc);
8913 }
8914 
8915 /* SGMII related */
8916 
8917 /*
8918  * wm_sgmii_uses_mdio
8919  *
8920  * Check whether the transaction is to the internal PHY or the external
8921  * MDIO interface. Return true if it's MDIO.
8922  */
8923 static bool
8924 wm_sgmii_uses_mdio(struct wm_softc *sc)
8925 {
8926 	uint32_t reg;
8927 	bool ismdio = false;
8928 
8929 	switch (sc->sc_type) {
8930 	case WM_T_82575:
8931 	case WM_T_82576:
8932 		reg = CSR_READ(sc, WMREG_MDIC);
8933 		ismdio = ((reg & MDIC_DEST) != 0);
8934 		break;
8935 	case WM_T_82580:
8936 	case WM_T_I350:
8937 	case WM_T_I354:
8938 	case WM_T_I210:
8939 	case WM_T_I211:
8940 		reg = CSR_READ(sc, WMREG_MDICNFG);
8941 		ismdio = ((reg & MDICNFG_DEST) != 0);
8942 		break;
8943 	default:
8944 		break;
8945 	}
8946 
8947 	return ismdio;
8948 }
8949 
8950 /*
8951  * wm_sgmii_readreg:	[mii interface function]
8952  *
8953  *	Read a PHY register on the SGMII
8954  * This could be handled by the PHY layer if we didn't have to lock the
8955  * ressource ...
8956  */
8957 static int
8958 wm_sgmii_readreg(device_t self, int phy, int reg)
8959 {
8960 	struct wm_softc *sc = device_private(self);
8961 	uint32_t i2ccmd;
8962 	int i, rv;
8963 
8964 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
8965 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8966 		    __func__);
8967 		return 0;
8968 	}
8969 
8970 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
8971 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
8972 	    | I2CCMD_OPCODE_READ;
8973 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
8974 
8975 	/* Poll the ready bit */
8976 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
8977 		delay(50);
8978 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
8979 		if (i2ccmd & I2CCMD_READY)
8980 			break;
8981 	}
8982 	if ((i2ccmd & I2CCMD_READY) == 0)
8983 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
8984 	if ((i2ccmd & I2CCMD_ERROR) != 0)
8985 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
8986 
8987 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
8988 
8989 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
8990 	return rv;
8991 }
8992 
8993 /*
8994  * wm_sgmii_writereg:	[mii interface function]
8995  *
8996  *	Write a PHY register on the SGMII.
8997  * This could be handled by the PHY layer if we didn't have to lock the
8998  * ressource ...
8999  */
9000 static void
9001 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9002 {
9003 	struct wm_softc *sc = device_private(self);
9004 	uint32_t i2ccmd;
9005 	int i;
9006 	int val_swapped;
9007 
9008 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9009 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9010 		    __func__);
9011 		return;
9012 	}
9013 	/* Swap the data bytes for the I2C interface */
9014 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9015 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9016 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9017 	    | I2CCMD_OPCODE_WRITE | val_swapped;
9018 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9019 
9020 	/* Poll the ready bit */
9021 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9022 		delay(50);
9023 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9024 		if (i2ccmd & I2CCMD_READY)
9025 			break;
9026 	}
9027 	if ((i2ccmd & I2CCMD_READY) == 0)
9028 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9029 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9030 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9031 
9032 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
9033 }
9034 
9035 /* TBI related */
9036 
9037 /*
9038  * wm_tbi_mediainit:
9039  *
9040  *	Initialize media for use on 1000BASE-X devices.
9041  */
9042 static void
9043 wm_tbi_mediainit(struct wm_softc *sc)
9044 {
9045 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9046 	const char *sep = "";
9047 
9048 	if (sc->sc_type < WM_T_82543)
9049 		sc->sc_tipg = TIPG_WM_DFLT;
9050 	else
9051 		sc->sc_tipg = TIPG_LG_DFLT;
9052 
9053 	sc->sc_tbi_serdes_anegticks = 5;
9054 
9055 	/* Initialize our media structures */
9056 	sc->sc_mii.mii_ifp = ifp;
9057 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
9058 
9059 	if ((sc->sc_type >= WM_T_82575)
9060 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9061 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9062 		    wm_serdes_mediachange, wm_serdes_mediastatus);
9063 	else
9064 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9065 		    wm_tbi_mediachange, wm_tbi_mediastatus);
9066 
9067 	/*
9068 	 * SWD Pins:
9069 	 *
9070 	 *	0 = Link LED (output)
9071 	 *	1 = Loss Of Signal (input)
9072 	 */
9073 	sc->sc_ctrl |= CTRL_SWDPIO(0);
9074 
9075 	/* XXX Perhaps this is only for TBI */
9076 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9077 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9078 
9079 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9080 		sc->sc_ctrl &= ~CTRL_LRST;
9081 
9082 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9083 
9084 #define	ADD(ss, mm, dd)							\
9085 do {									\
9086 	aprint_normal("%s%s", sep, ss);					\
9087 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9088 	sep = ", ";							\
9089 } while (/*CONSTCOND*/0)
9090 
9091 	aprint_normal_dev(sc->sc_dev, "");
9092 
9093 	/* Only 82545 is LX */
9094 	if (sc->sc_type == WM_T_82545) {
9095 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9096 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9097 	} else {
9098 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9099 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9100 	}
9101 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9102 	aprint_normal("\n");
9103 
9104 #undef ADD
9105 
9106 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9107 }
9108 
9109 /*
9110  * wm_tbi_mediachange:	[ifmedia interface function]
9111  *
9112  *	Set hardware to newly-selected media on a 1000BASE-X device.
9113  */
9114 static int
9115 wm_tbi_mediachange(struct ifnet *ifp)
9116 {
9117 	struct wm_softc *sc = ifp->if_softc;
9118 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9119 	uint32_t status;
9120 	int i;
9121 
9122 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9123 		/* XXX need some work for >= 82571 and < 82575 */
9124 		if (sc->sc_type < WM_T_82575)
9125 			return 0;
9126 	}
9127 
9128 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9129 	    || (sc->sc_type >= WM_T_82575))
9130 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9131 
9132 	sc->sc_ctrl &= ~CTRL_LRST;
9133 	sc->sc_txcw = TXCW_ANE;
9134 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9135 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
9136 	else if (ife->ifm_media & IFM_FDX)
9137 		sc->sc_txcw |= TXCW_FD;
9138 	else
9139 		sc->sc_txcw |= TXCW_HD;
9140 
9141 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9142 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9143 
9144 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9145 		    device_xname(sc->sc_dev), sc->sc_txcw));
9146 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9147 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9148 	CSR_WRITE_FLUSH(sc);
9149 	delay(1000);
9150 
9151 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9152 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9153 
9154 	/*
9155 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9156 	 * optics detect a signal, 0 if they don't.
9157 	 */
9158 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9159 		/* Have signal; wait for the link to come up. */
9160 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9161 			delay(10000);
9162 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9163 				break;
9164 		}
9165 
9166 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9167 			    device_xname(sc->sc_dev),i));
9168 
9169 		status = CSR_READ(sc, WMREG_STATUS);
9170 		DPRINTF(WM_DEBUG_LINK,
9171 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9172 			device_xname(sc->sc_dev),status, STATUS_LU));
9173 		if (status & STATUS_LU) {
9174 			/* Link is up. */
9175 			DPRINTF(WM_DEBUG_LINK,
9176 			    ("%s: LINK: set media -> link up %s\n",
9177 			    device_xname(sc->sc_dev),
9178 			    (status & STATUS_FD) ? "FDX" : "HDX"));
9179 
9180 			/*
9181 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9182 			 * so we should update sc->sc_ctrl
9183 			 */
9184 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9185 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9186 			sc->sc_fcrtl &= ~FCRTL_XONE;
9187 			if (status & STATUS_FD)
9188 				sc->sc_tctl |=
9189 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9190 			else
9191 				sc->sc_tctl |=
9192 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9193 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9194 				sc->sc_fcrtl |= FCRTL_XONE;
9195 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9196 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9197 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
9198 				      sc->sc_fcrtl);
9199 			sc->sc_tbi_linkup = 1;
9200 		} else {
9201 			if (i == WM_LINKUP_TIMEOUT)
9202 				wm_check_for_link(sc);
9203 			/* Link is down. */
9204 			DPRINTF(WM_DEBUG_LINK,
9205 			    ("%s: LINK: set media -> link down\n",
9206 			    device_xname(sc->sc_dev)));
9207 			sc->sc_tbi_linkup = 0;
9208 		}
9209 	} else {
9210 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9211 		    device_xname(sc->sc_dev)));
9212 		sc->sc_tbi_linkup = 0;
9213 	}
9214 
9215 	wm_tbi_serdes_set_linkled(sc);
9216 
9217 	return 0;
9218 }
9219 
9220 /*
9221  * wm_tbi_mediastatus:	[ifmedia interface function]
9222  *
9223  *	Get the current interface media status on a 1000BASE-X device.
9224  */
9225 static void
9226 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9227 {
9228 	struct wm_softc *sc = ifp->if_softc;
9229 	uint32_t ctrl, status;
9230 
9231 	ifmr->ifm_status = IFM_AVALID;
9232 	ifmr->ifm_active = IFM_ETHER;
9233 
9234 	status = CSR_READ(sc, WMREG_STATUS);
9235 	if ((status & STATUS_LU) == 0) {
9236 		ifmr->ifm_active |= IFM_NONE;
9237 		return;
9238 	}
9239 
9240 	ifmr->ifm_status |= IFM_ACTIVE;
9241 	/* Only 82545 is LX */
9242 	if (sc->sc_type == WM_T_82545)
9243 		ifmr->ifm_active |= IFM_1000_LX;
9244 	else
9245 		ifmr->ifm_active |= IFM_1000_SX;
9246 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9247 		ifmr->ifm_active |= IFM_FDX;
9248 	else
9249 		ifmr->ifm_active |= IFM_HDX;
9250 	ctrl = CSR_READ(sc, WMREG_CTRL);
9251 	if (ctrl & CTRL_RFCE)
9252 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9253 	if (ctrl & CTRL_TFCE)
9254 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9255 }
9256 
9257 /* XXX TBI only */
9258 static int
9259 wm_check_for_link(struct wm_softc *sc)
9260 {
9261 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9262 	uint32_t rxcw;
9263 	uint32_t ctrl;
9264 	uint32_t status;
9265 	uint32_t sig;
9266 
9267 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9268 		/* XXX need some work for >= 82571 */
9269 		if (sc->sc_type >= WM_T_82571) {
9270 			sc->sc_tbi_linkup = 1;
9271 			return 0;
9272 		}
9273 	}
9274 
9275 	rxcw = CSR_READ(sc, WMREG_RXCW);
9276 	ctrl = CSR_READ(sc, WMREG_CTRL);
9277 	status = CSR_READ(sc, WMREG_STATUS);
9278 
9279 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9280 
9281 	DPRINTF(WM_DEBUG_LINK,
9282 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9283 		device_xname(sc->sc_dev), __func__,
9284 		((ctrl & CTRL_SWDPIN(1)) == sig),
9285 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9286 
9287 	/*
9288 	 * SWDPIN   LU RXCW
9289 	 *      0    0    0
9290 	 *      0    0    1	(should not happen)
9291 	 *      0    1    0	(should not happen)
9292 	 *      0    1    1	(should not happen)
9293 	 *      1    0    0	Disable autonego and force linkup
9294 	 *      1    0    1	got /C/ but not linkup yet
9295 	 *      1    1    0	(linkup)
9296 	 *      1    1    1	If IFM_AUTO, back to autonego
9297 	 *
9298 	 */
9299 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
9300 	    && ((status & STATUS_LU) == 0)
9301 	    && ((rxcw & RXCW_C) == 0)) {
9302 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9303 			__func__));
9304 		sc->sc_tbi_linkup = 0;
9305 		/* Disable auto-negotiation in the TXCW register */
9306 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9307 
9308 		/*
9309 		 * Force link-up and also force full-duplex.
9310 		 *
9311 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
9312 		 * so we should update sc->sc_ctrl
9313 		 */
9314 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9315 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9316 	} else if (((status & STATUS_LU) != 0)
9317 	    && ((rxcw & RXCW_C) != 0)
9318 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9319 		sc->sc_tbi_linkup = 1;
9320 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9321 			__func__));
9322 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9323 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9324 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9325 	    && ((rxcw & RXCW_C) != 0)) {
9326 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
9327 	} else {
9328 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9329 			status));
9330 	}
9331 
9332 	return 0;
9333 }
9334 
9335 /*
9336  * wm_tbi_tick:
9337  *
9338  *	Check the link on TBI devices.
9339  *	This function acts as mii_tick().
9340  */
9341 static void
9342 wm_tbi_tick(struct wm_softc *sc)
9343 {
9344 	struct mii_data *mii = &sc->sc_mii;
9345 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9346 	uint32_t status;
9347 
9348 	KASSERT(WM_CORE_LOCKED(sc));
9349 
9350 	status = CSR_READ(sc, WMREG_STATUS);
9351 
9352 	/* XXX is this needed? */
9353 	(void)CSR_READ(sc, WMREG_RXCW);
9354 	(void)CSR_READ(sc, WMREG_CTRL);
9355 
9356 	/* set link status */
9357 	if ((status & STATUS_LU) == 0) {
9358 		DPRINTF(WM_DEBUG_LINK,
9359 		    ("%s: LINK: checklink -> down\n",
9360 			device_xname(sc->sc_dev)));
9361 		sc->sc_tbi_linkup = 0;
9362 	} else if (sc->sc_tbi_linkup == 0) {
9363 		DPRINTF(WM_DEBUG_LINK,
9364 		    ("%s: LINK: checklink -> up %s\n",
9365 			device_xname(sc->sc_dev),
9366 			(status & STATUS_FD) ? "FDX" : "HDX"));
9367 		sc->sc_tbi_linkup = 1;
9368 		sc->sc_tbi_serdes_ticks = 0;
9369 	}
9370 
9371 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9372 		goto setled;
9373 
9374 	if ((status & STATUS_LU) == 0) {
9375 		sc->sc_tbi_linkup = 0;
9376 		/* If the timer expired, retry autonegotiation */
9377 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9378 		    && (++sc->sc_tbi_serdes_ticks
9379 			>= sc->sc_tbi_serdes_anegticks)) {
9380 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9381 			sc->sc_tbi_serdes_ticks = 0;
9382 			/*
9383 			 * Reset the link, and let autonegotiation do
9384 			 * its thing
9385 			 */
9386 			sc->sc_ctrl |= CTRL_LRST;
9387 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9388 			CSR_WRITE_FLUSH(sc);
9389 			delay(1000);
9390 			sc->sc_ctrl &= ~CTRL_LRST;
9391 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9392 			CSR_WRITE_FLUSH(sc);
9393 			delay(1000);
9394 			CSR_WRITE(sc, WMREG_TXCW,
9395 			    sc->sc_txcw & ~TXCW_ANE);
9396 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9397 		}
9398 	}
9399 
9400 setled:
9401 	wm_tbi_serdes_set_linkled(sc);
9402 }
9403 
9404 /* SERDES related */
9405 static void
9406 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9407 {
9408 	uint32_t reg;
9409 
9410 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9411 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
9412 		return;
9413 
9414 	reg = CSR_READ(sc, WMREG_PCS_CFG);
9415 	reg |= PCS_CFG_PCS_EN;
9416 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9417 
9418 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
9419 	reg &= ~CTRL_EXT_SWDPIN(3);
9420 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9421 	CSR_WRITE_FLUSH(sc);
9422 }
9423 
9424 static int
9425 wm_serdes_mediachange(struct ifnet *ifp)
9426 {
9427 	struct wm_softc *sc = ifp->if_softc;
9428 	bool pcs_autoneg = true; /* XXX */
9429 	uint32_t ctrl_ext, pcs_lctl, reg;
9430 
9431 	/* XXX Currently, this function is not called on 8257[12] */
9432 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9433 	    || (sc->sc_type >= WM_T_82575))
9434 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9435 
9436 	wm_serdes_power_up_link_82575(sc);
9437 
9438 	sc->sc_ctrl |= CTRL_SLU;
9439 
9440 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9441 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9442 
9443 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9444 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9445 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9446 	case CTRL_EXT_LINK_MODE_SGMII:
9447 		pcs_autoneg = true;
9448 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9449 		break;
9450 	case CTRL_EXT_LINK_MODE_1000KX:
9451 		pcs_autoneg = false;
9452 		/* FALLTHROUGH */
9453 	default:
9454 		if ((sc->sc_type == WM_T_82575)
9455 		    || (sc->sc_type == WM_T_82576)) {
9456 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9457 				pcs_autoneg = false;
9458 		}
9459 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9460 		    | CTRL_FRCFDX;
9461 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9462 	}
9463 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9464 
9465 	if (pcs_autoneg) {
9466 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9467 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9468 
9469 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
9470 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9471 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9472 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9473 	} else
9474 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9475 
9476 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9477 
9478 
9479 	return 0;
9480 }
9481 
9482 static void
9483 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9484 {
9485 	struct wm_softc *sc = ifp->if_softc;
9486 	struct mii_data *mii = &sc->sc_mii;
9487 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9488 	uint32_t pcs_adv, pcs_lpab, reg;
9489 
9490 	ifmr->ifm_status = IFM_AVALID;
9491 	ifmr->ifm_active = IFM_ETHER;
9492 
9493 	/* Check PCS */
9494 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9495 	if ((reg & PCS_LSTS_LINKOK) == 0) {
9496 		ifmr->ifm_active |= IFM_NONE;
9497 		sc->sc_tbi_linkup = 0;
9498 		goto setled;
9499 	}
9500 
9501 	sc->sc_tbi_linkup = 1;
9502 	ifmr->ifm_status |= IFM_ACTIVE;
9503 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9504 	if ((reg & PCS_LSTS_FDX) != 0)
9505 		ifmr->ifm_active |= IFM_FDX;
9506 	else
9507 		ifmr->ifm_active |= IFM_HDX;
9508 	mii->mii_media_active &= ~IFM_ETH_FMASK;
9509 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9510 		/* Check flow */
9511 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
9512 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
9513 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
9514 			goto setled;
9515 		}
9516 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9517 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9518 		DPRINTF(WM_DEBUG_LINK,
9519 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
9520 		if ((pcs_adv & TXCW_SYM_PAUSE)
9521 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
9522 			mii->mii_media_active |= IFM_FLOW
9523 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9524 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9525 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9526 		    && (pcs_lpab & TXCW_SYM_PAUSE)
9527 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9528 			mii->mii_media_active |= IFM_FLOW
9529 			    | IFM_ETH_TXPAUSE;
9530 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
9531 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9532 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9533 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9534 			mii->mii_media_active |= IFM_FLOW
9535 			    | IFM_ETH_RXPAUSE;
9536 		} else {
9537 		}
9538 	}
9539 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9540 	    | (mii->mii_media_active & IFM_ETH_FMASK);
9541 setled:
9542 	wm_tbi_serdes_set_linkled(sc);
9543 }
9544 
9545 /*
9546  * wm_serdes_tick:
9547  *
9548  *	Check the link on serdes devices.
9549  */
9550 static void
9551 wm_serdes_tick(struct wm_softc *sc)
9552 {
9553 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9554 	struct mii_data *mii = &sc->sc_mii;
9555 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9556 	uint32_t reg;
9557 
9558 	KASSERT(WM_CORE_LOCKED(sc));
9559 
9560 	mii->mii_media_status = IFM_AVALID;
9561 	mii->mii_media_active = IFM_ETHER;
9562 
9563 	/* Check PCS */
9564 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9565 	if ((reg & PCS_LSTS_LINKOK) != 0) {
9566 		mii->mii_media_status |= IFM_ACTIVE;
9567 		sc->sc_tbi_linkup = 1;
9568 		sc->sc_tbi_serdes_ticks = 0;
9569 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
9570 		if ((reg & PCS_LSTS_FDX) != 0)
9571 			mii->mii_media_active |= IFM_FDX;
9572 		else
9573 			mii->mii_media_active |= IFM_HDX;
9574 	} else {
9575 		mii->mii_media_status |= IFM_NONE;
9576 		sc->sc_tbi_linkup = 0;
9577 		    /* If the timer expired, retry autonegotiation */
9578 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9579 		    && (++sc->sc_tbi_serdes_ticks
9580 			>= sc->sc_tbi_serdes_anegticks)) {
9581 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9582 			sc->sc_tbi_serdes_ticks = 0;
9583 			/* XXX */
9584 			wm_serdes_mediachange(ifp);
9585 		}
9586 	}
9587 
9588 	wm_tbi_serdes_set_linkled(sc);
9589 }
9590 
9591 /* SFP related */
9592 
9593 static int
9594 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9595 {
9596 	uint32_t i2ccmd;
9597 	int i;
9598 
9599 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9600 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9601 
9602 	/* Poll the ready bit */
9603 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9604 		delay(50);
9605 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9606 		if (i2ccmd & I2CCMD_READY)
9607 			break;
9608 	}
9609 	if ((i2ccmd & I2CCMD_READY) == 0)
9610 		return -1;
9611 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9612 		return -1;
9613 
9614 	*data = i2ccmd & 0x00ff;
9615 
9616 	return 0;
9617 }
9618 
9619 static uint32_t
9620 wm_sfp_get_media_type(struct wm_softc *sc)
9621 {
9622 	uint32_t ctrl_ext;
9623 	uint8_t val = 0;
9624 	int timeout = 3;
9625 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9626 	int rv = -1;
9627 
9628 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9629 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9630 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9631 	CSR_WRITE_FLUSH(sc);
9632 
9633 	/* Read SFP module data */
9634 	while (timeout) {
9635 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9636 		if (rv == 0)
9637 			break;
9638 		delay(100*1000); /* XXX too big */
9639 		timeout--;
9640 	}
9641 	if (rv != 0)
9642 		goto out;
9643 	switch (val) {
9644 	case SFF_SFP_ID_SFF:
9645 		aprint_normal_dev(sc->sc_dev,
9646 		    "Module/Connector soldered to board\n");
9647 		break;
9648 	case SFF_SFP_ID_SFP:
9649 		aprint_normal_dev(sc->sc_dev, "SFP\n");
9650 		break;
9651 	case SFF_SFP_ID_UNKNOWN:
9652 		goto out;
9653 	default:
9654 		break;
9655 	}
9656 
9657 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9658 	if (rv != 0) {
9659 		goto out;
9660 	}
9661 
9662 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9663 		mediatype = WM_MEDIATYPE_SERDES;
9664 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9665 		sc->sc_flags |= WM_F_SGMII;
9666 		mediatype = WM_MEDIATYPE_COPPER;
9667 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9668 		sc->sc_flags |= WM_F_SGMII;
9669 		mediatype = WM_MEDIATYPE_SERDES;
9670 	}
9671 
9672 out:
9673 	/* Restore I2C interface setting */
9674 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9675 
9676 	return mediatype;
9677 }
9678 /*
9679  * NVM related.
9680  * Microwire, SPI (w/wo EERD) and Flash.
9681  */
9682 
9683 /* Both spi and uwire */
9684 
9685 /*
9686  * wm_eeprom_sendbits:
9687  *
9688  *	Send a series of bits to the EEPROM.
9689  */
9690 static void
9691 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9692 {
9693 	uint32_t reg;
9694 	int x;
9695 
9696 	reg = CSR_READ(sc, WMREG_EECD);
9697 
9698 	for (x = nbits; x > 0; x--) {
9699 		if (bits & (1U << (x - 1)))
9700 			reg |= EECD_DI;
9701 		else
9702 			reg &= ~EECD_DI;
9703 		CSR_WRITE(sc, WMREG_EECD, reg);
9704 		CSR_WRITE_FLUSH(sc);
9705 		delay(2);
9706 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9707 		CSR_WRITE_FLUSH(sc);
9708 		delay(2);
9709 		CSR_WRITE(sc, WMREG_EECD, reg);
9710 		CSR_WRITE_FLUSH(sc);
9711 		delay(2);
9712 	}
9713 }
9714 
9715 /*
9716  * wm_eeprom_recvbits:
9717  *
9718  *	Receive a series of bits from the EEPROM.
9719  */
9720 static void
9721 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9722 {
9723 	uint32_t reg, val;
9724 	int x;
9725 
9726 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9727 
9728 	val = 0;
9729 	for (x = nbits; x > 0; x--) {
9730 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9731 		CSR_WRITE_FLUSH(sc);
9732 		delay(2);
9733 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9734 			val |= (1U << (x - 1));
9735 		CSR_WRITE(sc, WMREG_EECD, reg);
9736 		CSR_WRITE_FLUSH(sc);
9737 		delay(2);
9738 	}
9739 	*valp = val;
9740 }
9741 
9742 /* Microwire */
9743 
9744 /*
9745  * wm_nvm_read_uwire:
9746  *
9747  *	Read a word from the EEPROM using the MicroWire protocol.
9748  */
9749 static int
9750 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9751 {
9752 	uint32_t reg, val;
9753 	int i;
9754 
9755 	for (i = 0; i < wordcnt; i++) {
9756 		/* Clear SK and DI. */
9757 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
9758 		CSR_WRITE(sc, WMREG_EECD, reg);
9759 
9760 		/*
9761 		 * XXX: workaround for a bug in qemu-0.12.x and prior
9762 		 * and Xen.
9763 		 *
9764 		 * We use this workaround only for 82540 because qemu's
9765 		 * e1000 act as 82540.
9766 		 */
9767 		if (sc->sc_type == WM_T_82540) {
9768 			reg |= EECD_SK;
9769 			CSR_WRITE(sc, WMREG_EECD, reg);
9770 			reg &= ~EECD_SK;
9771 			CSR_WRITE(sc, WMREG_EECD, reg);
9772 			CSR_WRITE_FLUSH(sc);
9773 			delay(2);
9774 		}
9775 		/* XXX: end of workaround */
9776 
9777 		/* Set CHIP SELECT. */
9778 		reg |= EECD_CS;
9779 		CSR_WRITE(sc, WMREG_EECD, reg);
9780 		CSR_WRITE_FLUSH(sc);
9781 		delay(2);
9782 
9783 		/* Shift in the READ command. */
9784 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
9785 
9786 		/* Shift in address. */
9787 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
9788 
9789 		/* Shift out the data. */
9790 		wm_eeprom_recvbits(sc, &val, 16);
9791 		data[i] = val & 0xffff;
9792 
9793 		/* Clear CHIP SELECT. */
9794 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
9795 		CSR_WRITE(sc, WMREG_EECD, reg);
9796 		CSR_WRITE_FLUSH(sc);
9797 		delay(2);
9798 	}
9799 
9800 	return 0;
9801 }
9802 
9803 /* SPI */
9804 
9805 /*
9806  * Set SPI and FLASH related information from the EECD register.
9807  * For 82541 and 82547, the word size is taken from EEPROM.
9808  */
9809 static int
9810 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
9811 {
9812 	int size;
9813 	uint32_t reg;
9814 	uint16_t data;
9815 
9816 	reg = CSR_READ(sc, WMREG_EECD);
9817 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9818 
9819 	/* Read the size of NVM from EECD by default */
9820 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9821 	switch (sc->sc_type) {
9822 	case WM_T_82541:
9823 	case WM_T_82541_2:
9824 	case WM_T_82547:
9825 	case WM_T_82547_2:
9826 		/* Set dummy value to access EEPROM */
9827 		sc->sc_nvm_wordsize = 64;
9828 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9829 		reg = data;
9830 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9831 		if (size == 0)
9832 			size = 6; /* 64 word size */
9833 		else
9834 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9835 		break;
9836 	case WM_T_80003:
9837 	case WM_T_82571:
9838 	case WM_T_82572:
9839 	case WM_T_82573: /* SPI case */
9840 	case WM_T_82574: /* SPI case */
9841 	case WM_T_82583: /* SPI case */
9842 		size += NVM_WORD_SIZE_BASE_SHIFT;
9843 		if (size > 14)
9844 			size = 14;
9845 		break;
9846 	case WM_T_82575:
9847 	case WM_T_82576:
9848 	case WM_T_82580:
9849 	case WM_T_I350:
9850 	case WM_T_I354:
9851 	case WM_T_I210:
9852 	case WM_T_I211:
9853 		size += NVM_WORD_SIZE_BASE_SHIFT;
9854 		if (size > 15)
9855 			size = 15;
9856 		break;
9857 	default:
9858 		aprint_error_dev(sc->sc_dev,
9859 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9860 		return -1;
9861 		break;
9862 	}
9863 
9864 	sc->sc_nvm_wordsize = 1 << size;
9865 
9866 	return 0;
9867 }
9868 
9869 /*
9870  * wm_nvm_ready_spi:
9871  *
9872  *	Wait for a SPI EEPROM to be ready for commands.
9873  */
9874 static int
9875 wm_nvm_ready_spi(struct wm_softc *sc)
9876 {
9877 	uint32_t val;
9878 	int usec;
9879 
9880 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9881 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9882 		wm_eeprom_recvbits(sc, &val, 8);
9883 		if ((val & SPI_SR_RDY) == 0)
9884 			break;
9885 	}
9886 	if (usec >= SPI_MAX_RETRIES) {
9887 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
9888 		return 1;
9889 	}
9890 	return 0;
9891 }
9892 
9893 /*
9894  * wm_nvm_read_spi:
9895  *
9896  *	Read a work from the EEPROM using the SPI protocol.
9897  */
9898 static int
9899 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9900 {
9901 	uint32_t reg, val;
9902 	int i;
9903 	uint8_t opc;
9904 
9905 	/* Clear SK and CS. */
9906 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
9907 	CSR_WRITE(sc, WMREG_EECD, reg);
9908 	CSR_WRITE_FLUSH(sc);
9909 	delay(2);
9910 
9911 	if (wm_nvm_ready_spi(sc))
9912 		return 1;
9913 
9914 	/* Toggle CS to flush commands. */
9915 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
9916 	CSR_WRITE_FLUSH(sc);
9917 	delay(2);
9918 	CSR_WRITE(sc, WMREG_EECD, reg);
9919 	CSR_WRITE_FLUSH(sc);
9920 	delay(2);
9921 
9922 	opc = SPI_OPC_READ;
9923 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
9924 		opc |= SPI_OPC_A8;
9925 
9926 	wm_eeprom_sendbits(sc, opc, 8);
9927 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
9928 
9929 	for (i = 0; i < wordcnt; i++) {
9930 		wm_eeprom_recvbits(sc, &val, 16);
9931 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
9932 	}
9933 
9934 	/* Raise CS and clear SK. */
9935 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
9936 	CSR_WRITE(sc, WMREG_EECD, reg);
9937 	CSR_WRITE_FLUSH(sc);
9938 	delay(2);
9939 
9940 	return 0;
9941 }
9942 
9943 /* Using with EERD */
9944 
9945 static int
9946 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
9947 {
9948 	uint32_t attempts = 100000;
9949 	uint32_t i, reg = 0;
9950 	int32_t done = -1;
9951 
9952 	for (i = 0; i < attempts; i++) {
9953 		reg = CSR_READ(sc, rw);
9954 
9955 		if (reg & EERD_DONE) {
9956 			done = 0;
9957 			break;
9958 		}
9959 		delay(5);
9960 	}
9961 
9962 	return done;
9963 }
9964 
9965 static int
9966 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
9967     uint16_t *data)
9968 {
9969 	int i, eerd = 0;
9970 	int error = 0;
9971 
9972 	for (i = 0; i < wordcnt; i++) {
9973 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
9974 
9975 		CSR_WRITE(sc, WMREG_EERD, eerd);
9976 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
9977 		if (error != 0)
9978 			break;
9979 
9980 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
9981 	}
9982 
9983 	return error;
9984 }
9985 
9986 /* Flash */
9987 
9988 static int
9989 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
9990 {
9991 	uint32_t eecd;
9992 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
9993 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
9994 	uint8_t sig_byte = 0;
9995 
9996 	switch (sc->sc_type) {
9997 	case WM_T_ICH8:
9998 	case WM_T_ICH9:
9999 		eecd = CSR_READ(sc, WMREG_EECD);
10000 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10001 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10002 			return 0;
10003 		}
10004 		/* FALLTHROUGH */
10005 	default:
10006 		/* Default to 0 */
10007 		*bank = 0;
10008 
10009 		/* Check bank 0 */
10010 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
10011 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10012 			*bank = 0;
10013 			return 0;
10014 		}
10015 
10016 		/* Check bank 1 */
10017 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
10018 		    &sig_byte);
10019 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10020 			*bank = 1;
10021 			return 0;
10022 		}
10023 	}
10024 
10025 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10026 		device_xname(sc->sc_dev)));
10027 	return -1;
10028 }
10029 
10030 /******************************************************************************
10031  * This function does initial flash setup so that a new read/write/erase cycle
10032  * can be started.
10033  *
10034  * sc - The pointer to the hw structure
10035  ****************************************************************************/
10036 static int32_t
10037 wm_ich8_cycle_init(struct wm_softc *sc)
10038 {
10039 	uint16_t hsfsts;
10040 	int32_t error = 1;
10041 	int32_t i     = 0;
10042 
10043 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10044 
10045 	/* May be check the Flash Des Valid bit in Hw status */
10046 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10047 		return error;
10048 	}
10049 
10050 	/* Clear FCERR in Hw status by writing 1 */
10051 	/* Clear DAEL in Hw status by writing a 1 */
10052 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10053 
10054 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10055 
10056 	/*
10057 	 * Either we should have a hardware SPI cycle in progress bit to check
10058 	 * against, in order to start a new cycle or FDONE bit should be
10059 	 * changed in the hardware so that it is 1 after harware reset, which
10060 	 * can then be used as an indication whether a cycle is in progress or
10061 	 * has been completed .. we should also have some software semaphore
10062 	 * mechanism to guard FDONE or the cycle in progress bit so that two
10063 	 * threads access to those bits can be sequentiallized or a way so that
10064 	 * 2 threads dont start the cycle at the same time
10065 	 */
10066 
10067 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10068 		/*
10069 		 * There is no cycle running at present, so we can start a
10070 		 * cycle
10071 		 */
10072 
10073 		/* Begin by setting Flash Cycle Done. */
10074 		hsfsts |= HSFSTS_DONE;
10075 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10076 		error = 0;
10077 	} else {
10078 		/*
10079 		 * otherwise poll for sometime so the current cycle has a
10080 		 * chance to end before giving up.
10081 		 */
10082 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10083 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10084 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10085 				error = 0;
10086 				break;
10087 			}
10088 			delay(1);
10089 		}
10090 		if (error == 0) {
10091 			/*
10092 			 * Successful in waiting for previous cycle to timeout,
10093 			 * now set the Flash Cycle Done.
10094 			 */
10095 			hsfsts |= HSFSTS_DONE;
10096 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10097 		}
10098 	}
10099 	return error;
10100 }
10101 
10102 /******************************************************************************
10103  * This function starts a flash cycle and waits for its completion
10104  *
10105  * sc - The pointer to the hw structure
10106  ****************************************************************************/
10107 static int32_t
10108 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10109 {
10110 	uint16_t hsflctl;
10111 	uint16_t hsfsts;
10112 	int32_t error = 1;
10113 	uint32_t i = 0;
10114 
10115 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10116 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10117 	hsflctl |= HSFCTL_GO;
10118 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10119 
10120 	/* Wait till FDONE bit is set to 1 */
10121 	do {
10122 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10123 		if (hsfsts & HSFSTS_DONE)
10124 			break;
10125 		delay(1);
10126 		i++;
10127 	} while (i < timeout);
10128 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10129 		error = 0;
10130 
10131 	return error;
10132 }
10133 
10134 /******************************************************************************
10135  * Reads a byte or word from the NVM using the ICH8 flash access registers.
10136  *
10137  * sc - The pointer to the hw structure
10138  * index - The index of the byte or word to read.
10139  * size - Size of data to read, 1=byte 2=word
10140  * data - Pointer to the word to store the value read.
10141  *****************************************************************************/
10142 static int32_t
10143 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10144     uint32_t size, uint16_t *data)
10145 {
10146 	uint16_t hsfsts;
10147 	uint16_t hsflctl;
10148 	uint32_t flash_linear_address;
10149 	uint32_t flash_data = 0;
10150 	int32_t error = 1;
10151 	int32_t count = 0;
10152 
10153 	if (size < 1  || size > 2 || data == 0x0 ||
10154 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
10155 		return error;
10156 
10157 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10158 	    sc->sc_ich8_flash_base;
10159 
10160 	do {
10161 		delay(1);
10162 		/* Steps */
10163 		error = wm_ich8_cycle_init(sc);
10164 		if (error)
10165 			break;
10166 
10167 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10168 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10169 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10170 		    & HSFCTL_BCOUNT_MASK;
10171 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10172 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10173 
10174 		/*
10175 		 * Write the last 24 bits of index into Flash Linear address
10176 		 * field in Flash Address
10177 		 */
10178 		/* TODO: TBD maybe check the index against the size of flash */
10179 
10180 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10181 
10182 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10183 
10184 		/*
10185 		 * Check if FCERR is set to 1, if set to 1, clear it and try
10186 		 * the whole sequence a few more times, else read in (shift in)
10187 		 * the Flash Data0, the order is least significant byte first
10188 		 * msb to lsb
10189 		 */
10190 		if (error == 0) {
10191 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10192 			if (size == 1)
10193 				*data = (uint8_t)(flash_data & 0x000000FF);
10194 			else if (size == 2)
10195 				*data = (uint16_t)(flash_data & 0x0000FFFF);
10196 			break;
10197 		} else {
10198 			/*
10199 			 * If we've gotten here, then things are probably
10200 			 * completely hosed, but if the error condition is
10201 			 * detected, it won't hurt to give it another try...
10202 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10203 			 */
10204 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10205 			if (hsfsts & HSFSTS_ERR) {
10206 				/* Repeat for some time before giving up. */
10207 				continue;
10208 			} else if ((hsfsts & HSFSTS_DONE) == 0)
10209 				break;
10210 		}
10211 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10212 
10213 	return error;
10214 }
10215 
10216 /******************************************************************************
10217  * Reads a single byte from the NVM using the ICH8 flash access registers.
10218  *
10219  * sc - pointer to wm_hw structure
10220  * index - The index of the byte to read.
10221  * data - Pointer to a byte to store the value read.
10222  *****************************************************************************/
10223 static int32_t
10224 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10225 {
10226 	int32_t status;
10227 	uint16_t word = 0;
10228 
10229 	status = wm_read_ich8_data(sc, index, 1, &word);
10230 	if (status == 0)
10231 		*data = (uint8_t)word;
10232 	else
10233 		*data = 0;
10234 
10235 	return status;
10236 }
10237 
10238 /******************************************************************************
10239  * Reads a word from the NVM using the ICH8 flash access registers.
10240  *
10241  * sc - pointer to wm_hw structure
10242  * index - The starting byte index of the word to read.
10243  * data - Pointer to a word to store the value read.
10244  *****************************************************************************/
10245 static int32_t
10246 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10247 {
10248 	int32_t status;
10249 
10250 	status = wm_read_ich8_data(sc, index, 2, data);
10251 	return status;
10252 }
10253 
10254 /******************************************************************************
10255  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10256  * register.
10257  *
10258  * sc - Struct containing variables accessed by shared code
10259  * offset - offset of word in the EEPROM to read
10260  * data - word read from the EEPROM
10261  * words - number of words to read
10262  *****************************************************************************/
10263 static int
10264 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10265 {
10266 	int32_t  error = 0;
10267 	uint32_t flash_bank = 0;
10268 	uint32_t act_offset = 0;
10269 	uint32_t bank_offset = 0;
10270 	uint16_t word = 0;
10271 	uint16_t i = 0;
10272 
10273 	/*
10274 	 * We need to know which is the valid flash bank.  In the event
10275 	 * that we didn't allocate eeprom_shadow_ram, we may not be
10276 	 * managing flash_bank.  So it cannot be trusted and needs
10277 	 * to be updated with each read.
10278 	 */
10279 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10280 	if (error) {
10281 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10282 			device_xname(sc->sc_dev)));
10283 		flash_bank = 0;
10284 	}
10285 
10286 	/*
10287 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
10288 	 * size
10289 	 */
10290 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10291 
10292 	error = wm_get_swfwhw_semaphore(sc);
10293 	if (error) {
10294 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10295 		    __func__);
10296 		return error;
10297 	}
10298 
10299 	for (i = 0; i < words; i++) {
10300 		/* The NVM part needs a byte offset, hence * 2 */
10301 		act_offset = bank_offset + ((offset + i) * 2);
10302 		error = wm_read_ich8_word(sc, act_offset, &word);
10303 		if (error) {
10304 			aprint_error_dev(sc->sc_dev,
10305 			    "%s: failed to read NVM\n", __func__);
10306 			break;
10307 		}
10308 		data[i] = word;
10309 	}
10310 
10311 	wm_put_swfwhw_semaphore(sc);
10312 	return error;
10313 }
10314 
10315 /* iNVM */
10316 
10317 static int
10318 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10319 {
10320 	int32_t  rv = 0;
10321 	uint32_t invm_dword;
10322 	uint16_t i;
10323 	uint8_t record_type, word_address;
10324 
10325 	for (i = 0; i < INVM_SIZE; i++) {
10326 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10327 		/* Get record type */
10328 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10329 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10330 			break;
10331 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10332 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10333 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10334 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10335 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10336 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10337 			if (word_address == address) {
10338 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10339 				rv = 0;
10340 				break;
10341 			}
10342 		}
10343 	}
10344 
10345 	return rv;
10346 }
10347 
10348 static int
10349 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10350 {
10351 	int rv = 0;
10352 	int i;
10353 
10354 	for (i = 0; i < words; i++) {
10355 		switch (offset + i) {
10356 		case NVM_OFF_MACADDR:
10357 		case NVM_OFF_MACADDR1:
10358 		case NVM_OFF_MACADDR2:
10359 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10360 			if (rv != 0) {
10361 				data[i] = 0xffff;
10362 				rv = -1;
10363 			}
10364 			break;
10365 		case NVM_OFF_CFG2:
10366 			rv = wm_nvm_read_word_invm(sc, offset, data);
10367 			if (rv != 0) {
10368 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
10369 				rv = 0;
10370 			}
10371 			break;
10372 		case NVM_OFF_CFG4:
10373 			rv = wm_nvm_read_word_invm(sc, offset, data);
10374 			if (rv != 0) {
10375 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
10376 				rv = 0;
10377 			}
10378 			break;
10379 		case NVM_OFF_LED_1_CFG:
10380 			rv = wm_nvm_read_word_invm(sc, offset, data);
10381 			if (rv != 0) {
10382 				*data = NVM_LED_1_CFG_DEFAULT_I211;
10383 				rv = 0;
10384 			}
10385 			break;
10386 		case NVM_OFF_LED_0_2_CFG:
10387 			rv = wm_nvm_read_word_invm(sc, offset, data);
10388 			if (rv != 0) {
10389 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
10390 				rv = 0;
10391 			}
10392 			break;
10393 		case NVM_OFF_ID_LED_SETTINGS:
10394 			rv = wm_nvm_read_word_invm(sc, offset, data);
10395 			if (rv != 0) {
10396 				*data = ID_LED_RESERVED_FFFF;
10397 				rv = 0;
10398 			}
10399 			break;
10400 		default:
10401 			DPRINTF(WM_DEBUG_NVM,
10402 			    ("NVM word 0x%02x is not mapped.\n", offset));
10403 			*data = NVM_RESERVED_WORD;
10404 			break;
10405 		}
10406 	}
10407 
10408 	return rv;
10409 }
10410 
10411 /* Lock, detecting NVM type, validate checksum, version and read */
10412 
10413 /*
10414  * wm_nvm_acquire:
10415  *
10416  *	Perform the EEPROM handshake required on some chips.
10417  */
10418 static int
10419 wm_nvm_acquire(struct wm_softc *sc)
10420 {
10421 	uint32_t reg;
10422 	int x;
10423 	int ret = 0;
10424 
10425 	/* always success */
10426 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10427 		return 0;
10428 
10429 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10430 		ret = wm_get_swfwhw_semaphore(sc);
10431 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10432 		/* This will also do wm_get_swsm_semaphore() if needed */
10433 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10434 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10435 		ret = wm_get_swsm_semaphore(sc);
10436 	}
10437 
10438 	if (ret) {
10439 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10440 			__func__);
10441 		return 1;
10442 	}
10443 
10444 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10445 		reg = CSR_READ(sc, WMREG_EECD);
10446 
10447 		/* Request EEPROM access. */
10448 		reg |= EECD_EE_REQ;
10449 		CSR_WRITE(sc, WMREG_EECD, reg);
10450 
10451 		/* ..and wait for it to be granted. */
10452 		for (x = 0; x < 1000; x++) {
10453 			reg = CSR_READ(sc, WMREG_EECD);
10454 			if (reg & EECD_EE_GNT)
10455 				break;
10456 			delay(5);
10457 		}
10458 		if ((reg & EECD_EE_GNT) == 0) {
10459 			aprint_error_dev(sc->sc_dev,
10460 			    "could not acquire EEPROM GNT\n");
10461 			reg &= ~EECD_EE_REQ;
10462 			CSR_WRITE(sc, WMREG_EECD, reg);
10463 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10464 				wm_put_swfwhw_semaphore(sc);
10465 			if (sc->sc_flags & WM_F_LOCK_SWFW)
10466 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10467 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
10468 				wm_put_swsm_semaphore(sc);
10469 			return 1;
10470 		}
10471 	}
10472 
10473 	return 0;
10474 }
10475 
10476 /*
10477  * wm_nvm_release:
10478  *
10479  *	Release the EEPROM mutex.
10480  */
10481 static void
10482 wm_nvm_release(struct wm_softc *sc)
10483 {
10484 	uint32_t reg;
10485 
10486 	/* always success */
10487 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10488 		return;
10489 
10490 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10491 		reg = CSR_READ(sc, WMREG_EECD);
10492 		reg &= ~EECD_EE_REQ;
10493 		CSR_WRITE(sc, WMREG_EECD, reg);
10494 	}
10495 
10496 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10497 		wm_put_swfwhw_semaphore(sc);
10498 	if (sc->sc_flags & WM_F_LOCK_SWFW)
10499 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10500 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
10501 		wm_put_swsm_semaphore(sc);
10502 }
10503 
10504 static int
10505 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10506 {
10507 	uint32_t eecd = 0;
10508 
10509 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10510 	    || sc->sc_type == WM_T_82583) {
10511 		eecd = CSR_READ(sc, WMREG_EECD);
10512 
10513 		/* Isolate bits 15 & 16 */
10514 		eecd = ((eecd >> 15) & 0x03);
10515 
10516 		/* If both bits are set, device is Flash type */
10517 		if (eecd == 0x03)
10518 			return 0;
10519 	}
10520 	return 1;
10521 }
10522 
10523 static int
10524 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10525 {
10526 	uint32_t eec;
10527 
10528 	eec = CSR_READ(sc, WMREG_EEC);
10529 	if ((eec & EEC_FLASH_DETECTED) != 0)
10530 		return 1;
10531 
10532 	return 0;
10533 }
10534 
10535 /*
10536  * wm_nvm_validate_checksum
10537  *
10538  * The checksum is defined as the sum of the first 64 (16 bit) words.
10539  */
10540 static int
10541 wm_nvm_validate_checksum(struct wm_softc *sc)
10542 {
10543 	uint16_t checksum;
10544 	uint16_t eeprom_data;
10545 #ifdef WM_DEBUG
10546 	uint16_t csum_wordaddr, valid_checksum;
10547 #endif
10548 	int i;
10549 
10550 	checksum = 0;
10551 
10552 	/* Don't check for I211 */
10553 	if (sc->sc_type == WM_T_I211)
10554 		return 0;
10555 
10556 #ifdef WM_DEBUG
10557 	if (sc->sc_type == WM_T_PCH_LPT) {
10558 		csum_wordaddr = NVM_OFF_COMPAT;
10559 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10560 	} else {
10561 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10562 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10563 	}
10564 
10565 	/* Dump EEPROM image for debug */
10566 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10567 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10568 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10569 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10570 		if ((eeprom_data & valid_checksum) == 0) {
10571 			DPRINTF(WM_DEBUG_NVM,
10572 			    ("%s: NVM need to be updated (%04x != %04x)\n",
10573 				device_xname(sc->sc_dev), eeprom_data,
10574 				    valid_checksum));
10575 		}
10576 	}
10577 
10578 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
10579 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10580 		for (i = 0; i < NVM_SIZE; i++) {
10581 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
10582 				printf("XXXX ");
10583 			else
10584 				printf("%04hx ", eeprom_data);
10585 			if (i % 8 == 7)
10586 				printf("\n");
10587 		}
10588 	}
10589 
10590 #endif /* WM_DEBUG */
10591 
10592 	for (i = 0; i < NVM_SIZE; i++) {
10593 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
10594 			return 1;
10595 		checksum += eeprom_data;
10596 	}
10597 
10598 	if (checksum != (uint16_t) NVM_CHECKSUM) {
10599 #ifdef WM_DEBUG
10600 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10601 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10602 #endif
10603 	}
10604 
10605 	return 0;
10606 }
10607 
10608 static void
10609 wm_nvm_version_invm(struct wm_softc *sc)
10610 {
10611 	uint32_t dword;
10612 
10613 	/*
10614 	 * Linux's code to decode version is very strange, so we don't
10615 	 * obey that algorithm and just use word 61 as the document.
10616 	 * Perhaps it's not perfect though...
10617 	 *
10618 	 * Example:
10619 	 *
10620 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10621 	 */
10622 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10623 	dword = __SHIFTOUT(dword, INVM_VER_1);
10624 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10625 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10626 }
10627 
10628 static void
10629 wm_nvm_version(struct wm_softc *sc)
10630 {
10631 	uint16_t major, minor, build, patch;
10632 	uint16_t uid0, uid1;
10633 	uint16_t nvm_data;
10634 	uint16_t off;
10635 	bool check_version = false;
10636 	bool check_optionrom = false;
10637 	bool have_build = false;
10638 
10639 	/*
10640 	 * Version format:
10641 	 *
10642 	 * XYYZ
10643 	 * X0YZ
10644 	 * X0YY
10645 	 *
10646 	 * Example:
10647 	 *
10648 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
10649 	 *	82571	0x50a6	5.10.6?
10650 	 *	82572	0x506a	5.6.10?
10651 	 *	82572EI	0x5069	5.6.9?
10652 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
10653 	 *		0x2013	2.1.3?
10654 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
10655 	 */
10656 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
10657 	switch (sc->sc_type) {
10658 	case WM_T_82571:
10659 	case WM_T_82572:
10660 	case WM_T_82574:
10661 	case WM_T_82583:
10662 		check_version = true;
10663 		check_optionrom = true;
10664 		have_build = true;
10665 		break;
10666 	case WM_T_82575:
10667 	case WM_T_82576:
10668 	case WM_T_82580:
10669 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
10670 			check_version = true;
10671 		break;
10672 	case WM_T_I211:
10673 		wm_nvm_version_invm(sc);
10674 		goto printver;
10675 	case WM_T_I210:
10676 		if (!wm_nvm_get_flash_presence_i210(sc)) {
10677 			wm_nvm_version_invm(sc);
10678 			goto printver;
10679 		}
10680 		/* FALLTHROUGH */
10681 	case WM_T_I350:
10682 	case WM_T_I354:
10683 		check_version = true;
10684 		check_optionrom = true;
10685 		break;
10686 	default:
10687 		return;
10688 	}
10689 	if (check_version) {
10690 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
10691 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
10692 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
10693 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
10694 			build = nvm_data & NVM_BUILD_MASK;
10695 			have_build = true;
10696 		} else
10697 			minor = nvm_data & 0x00ff;
10698 
10699 		/* Decimal */
10700 		minor = (minor / 16) * 10 + (minor % 16);
10701 		sc->sc_nvm_ver_major = major;
10702 		sc->sc_nvm_ver_minor = minor;
10703 
10704 printver:
10705 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
10706 		    sc->sc_nvm_ver_minor);
10707 		if (have_build) {
10708 			sc->sc_nvm_ver_build = build;
10709 			aprint_verbose(".%d", build);
10710 		}
10711 	}
10712 	if (check_optionrom) {
10713 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
10714 		/* Option ROM Version */
10715 		if ((off != 0x0000) && (off != 0xffff)) {
10716 			off += NVM_COMBO_VER_OFF;
10717 			wm_nvm_read(sc, off + 1, 1, &uid1);
10718 			wm_nvm_read(sc, off, 1, &uid0);
10719 			if ((uid0 != 0) && (uid0 != 0xffff)
10720 			    && (uid1 != 0) && (uid1 != 0xffff)) {
10721 				/* 16bits */
10722 				major = uid0 >> 8;
10723 				build = (uid0 << 8) | (uid1 >> 8);
10724 				patch = uid1 & 0x00ff;
10725 				aprint_verbose(", option ROM Version %d.%d.%d",
10726 				    major, build, patch);
10727 			}
10728 		}
10729 	}
10730 
10731 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
10732 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
10733 }
10734 
10735 /*
10736  * wm_nvm_read:
10737  *
10738  *	Read data from the serial EEPROM.
10739  */
10740 static int
10741 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10742 {
10743 	int rv;
10744 
10745 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
10746 		return 1;
10747 
10748 	if (wm_nvm_acquire(sc))
10749 		return 1;
10750 
10751 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10752 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10753 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
10754 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
10755 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
10756 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
10757 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
10758 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
10759 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
10760 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
10761 	else
10762 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
10763 
10764 	wm_nvm_release(sc);
10765 	return rv;
10766 }
10767 
10768 /*
10769  * Hardware semaphores.
10770  * Very complexed...
10771  */
10772 
10773 static int
10774 wm_get_swsm_semaphore(struct wm_softc *sc)
10775 {
10776 	int32_t timeout;
10777 	uint32_t swsm;
10778 
10779 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
10780 		/* Get the SW semaphore. */
10781 		timeout = sc->sc_nvm_wordsize + 1;
10782 		while (timeout) {
10783 			swsm = CSR_READ(sc, WMREG_SWSM);
10784 
10785 			if ((swsm & SWSM_SMBI) == 0)
10786 				break;
10787 
10788 			delay(50);
10789 			timeout--;
10790 		}
10791 
10792 		if (timeout == 0) {
10793 			aprint_error_dev(sc->sc_dev,
10794 			    "could not acquire SWSM SMBI\n");
10795 			return 1;
10796 		}
10797 	}
10798 
10799 	/* Get the FW semaphore. */
10800 	timeout = sc->sc_nvm_wordsize + 1;
10801 	while (timeout) {
10802 		swsm = CSR_READ(sc, WMREG_SWSM);
10803 		swsm |= SWSM_SWESMBI;
10804 		CSR_WRITE(sc, WMREG_SWSM, swsm);
10805 		/* If we managed to set the bit we got the semaphore. */
10806 		swsm = CSR_READ(sc, WMREG_SWSM);
10807 		if (swsm & SWSM_SWESMBI)
10808 			break;
10809 
10810 		delay(50);
10811 		timeout--;
10812 	}
10813 
10814 	if (timeout == 0) {
10815 		aprint_error_dev(sc->sc_dev,
10816 		    "could not acquire SWSM SWESMBI\n");
10817 		/* Release semaphores */
10818 		wm_put_swsm_semaphore(sc);
10819 		return 1;
10820 	}
10821 	return 0;
10822 }
10823 
10824 static void
10825 wm_put_swsm_semaphore(struct wm_softc *sc)
10826 {
10827 	uint32_t swsm;
10828 
10829 	swsm = CSR_READ(sc, WMREG_SWSM);
10830 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
10831 	CSR_WRITE(sc, WMREG_SWSM, swsm);
10832 }
10833 
10834 static int
10835 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10836 {
10837 	uint32_t swfw_sync;
10838 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
10839 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
10840 	int timeout = 200;
10841 
10842 	for (timeout = 0; timeout < 200; timeout++) {
10843 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
10844 			if (wm_get_swsm_semaphore(sc)) {
10845 				aprint_error_dev(sc->sc_dev,
10846 				    "%s: failed to get semaphore\n",
10847 				    __func__);
10848 				return 1;
10849 			}
10850 		}
10851 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10852 		if ((swfw_sync & (swmask | fwmask)) == 0) {
10853 			swfw_sync |= swmask;
10854 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10855 			if (sc->sc_flags & WM_F_LOCK_SWSM)
10856 				wm_put_swsm_semaphore(sc);
10857 			return 0;
10858 		}
10859 		if (sc->sc_flags & WM_F_LOCK_SWSM)
10860 			wm_put_swsm_semaphore(sc);
10861 		delay(5000);
10862 	}
10863 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
10864 	    device_xname(sc->sc_dev), mask, swfw_sync);
10865 	return 1;
10866 }
10867 
10868 static void
10869 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
10870 {
10871 	uint32_t swfw_sync;
10872 
10873 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
10874 		while (wm_get_swsm_semaphore(sc) != 0)
10875 			continue;
10876 	}
10877 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
10878 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
10879 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
10880 	if (sc->sc_flags & WM_F_LOCK_SWSM)
10881 		wm_put_swsm_semaphore(sc);
10882 }
10883 
10884 static int
10885 wm_get_swfwhw_semaphore(struct wm_softc *sc)
10886 {
10887 	uint32_t ext_ctrl;
10888 	int timeout = 200;
10889 
10890 	for (timeout = 0; timeout < 200; timeout++) {
10891 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10892 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
10893 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10894 
10895 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10896 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
10897 			return 0;
10898 		delay(5000);
10899 	}
10900 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
10901 	    device_xname(sc->sc_dev), ext_ctrl);
10902 	return 1;
10903 }
10904 
10905 static void
10906 wm_put_swfwhw_semaphore(struct wm_softc *sc)
10907 {
10908 	uint32_t ext_ctrl;
10909 
10910 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
10911 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10912 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
10913 }
10914 
10915 static int
10916 wm_get_hw_semaphore_82573(struct wm_softc *sc)
10917 {
10918 	int i = 0;
10919 	uint32_t reg;
10920 
10921 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10922 	do {
10923 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
10924 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
10925 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10926 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
10927 			break;
10928 		delay(2*1000);
10929 		i++;
10930 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
10931 
10932 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
10933 		wm_put_hw_semaphore_82573(sc);
10934 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
10935 		    device_xname(sc->sc_dev));
10936 		return -1;
10937 	}
10938 
10939 	return 0;
10940 }
10941 
10942 static void
10943 wm_put_hw_semaphore_82573(struct wm_softc *sc)
10944 {
10945 	uint32_t reg;
10946 
10947 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
10948 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
10949 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
10950 }
10951 
10952 /*
10953  * Management mode and power management related subroutines.
10954  * BMC, AMT, suspend/resume and EEE.
10955  */
10956 
10957 #ifdef WM_WOL
10958 static int
10959 wm_check_mng_mode(struct wm_softc *sc)
10960 {
10961 	int rv;
10962 
10963 	switch (sc->sc_type) {
10964 	case WM_T_ICH8:
10965 	case WM_T_ICH9:
10966 	case WM_T_ICH10:
10967 	case WM_T_PCH:
10968 	case WM_T_PCH2:
10969 	case WM_T_PCH_LPT:
10970 		rv = wm_check_mng_mode_ich8lan(sc);
10971 		break;
10972 	case WM_T_82574:
10973 	case WM_T_82583:
10974 		rv = wm_check_mng_mode_82574(sc);
10975 		break;
10976 	case WM_T_82571:
10977 	case WM_T_82572:
10978 	case WM_T_82573:
10979 	case WM_T_80003:
10980 		rv = wm_check_mng_mode_generic(sc);
10981 		break;
10982 	default:
10983 		/* noting to do */
10984 		rv = 0;
10985 		break;
10986 	}
10987 
10988 	return rv;
10989 }
10990 
10991 static int
10992 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
10993 {
10994 	uint32_t fwsm;
10995 
10996 	fwsm = CSR_READ(sc, WMREG_FWSM);
10997 
10998 	if (((fwsm & FWSM_FW_VALID) != 0)
10999 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11000 		return 1;
11001 
11002 	return 0;
11003 }
11004 
11005 static int
11006 wm_check_mng_mode_82574(struct wm_softc *sc)
11007 {
11008 	uint16_t data;
11009 
11010 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11011 
11012 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
11013 		return 1;
11014 
11015 	return 0;
11016 }
11017 
11018 static int
11019 wm_check_mng_mode_generic(struct wm_softc *sc)
11020 {
11021 	uint32_t fwsm;
11022 
11023 	fwsm = CSR_READ(sc, WMREG_FWSM);
11024 
11025 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11026 		return 1;
11027 
11028 	return 0;
11029 }
11030 #endif /* WM_WOL */
11031 
11032 static int
11033 wm_enable_mng_pass_thru(struct wm_softc *sc)
11034 {
11035 	uint32_t manc, fwsm, factps;
11036 
11037 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11038 		return 0;
11039 
11040 	manc = CSR_READ(sc, WMREG_MANC);
11041 
11042 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11043 		device_xname(sc->sc_dev), manc));
11044 	if ((manc & MANC_RECV_TCO_EN) == 0)
11045 		return 0;
11046 
11047 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11048 		fwsm = CSR_READ(sc, WMREG_FWSM);
11049 		factps = CSR_READ(sc, WMREG_FACTPS);
11050 		if (((factps & FACTPS_MNGCG) == 0)
11051 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11052 			return 1;
11053 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11054 		uint16_t data;
11055 
11056 		factps = CSR_READ(sc, WMREG_FACTPS);
11057 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11058 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11059 			device_xname(sc->sc_dev), factps, data));
11060 		if (((factps & FACTPS_MNGCG) == 0)
11061 		    && ((data & NVM_CFG2_MNGM_MASK)
11062 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11063 			return 1;
11064 	} else if (((manc & MANC_SMBUS_EN) != 0)
11065 	    && ((manc & MANC_ASF_EN) == 0))
11066 		return 1;
11067 
11068 	return 0;
11069 }
11070 
11071 static bool
11072 wm_phy_resetisblocked(struct wm_softc *sc)
11073 {
11074 	bool blocked = false;
11075 	uint32_t reg;
11076 	int i = 0;
11077 
11078 	switch (sc->sc_type) {
11079 	case WM_T_ICH8:
11080 	case WM_T_ICH9:
11081 	case WM_T_ICH10:
11082 	case WM_T_PCH:
11083 	case WM_T_PCH2:
11084 	case WM_T_PCH_LPT:
11085 		do {
11086 			reg = CSR_READ(sc, WMREG_FWSM);
11087 			if ((reg & FWSM_RSPCIPHY) == 0) {
11088 				blocked = true;
11089 				delay(10*1000);
11090 				continue;
11091 			}
11092 			blocked = false;
11093 		} while (blocked && (i++ < 10));
11094 		return blocked;
11095 		break;
11096 	case WM_T_82571:
11097 	case WM_T_82572:
11098 	case WM_T_82573:
11099 	case WM_T_82574:
11100 	case WM_T_82583:
11101 	case WM_T_80003:
11102 		reg = CSR_READ(sc, WMREG_MANC);
11103 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11104 			return true;
11105 		else
11106 			return false;
11107 		break;
11108 	default:
11109 		/* no problem */
11110 		break;
11111 	}
11112 
11113 	return false;
11114 }
11115 
11116 static void
11117 wm_get_hw_control(struct wm_softc *sc)
11118 {
11119 	uint32_t reg;
11120 
11121 	switch (sc->sc_type) {
11122 	case WM_T_82573:
11123 		reg = CSR_READ(sc, WMREG_SWSM);
11124 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11125 		break;
11126 	case WM_T_82571:
11127 	case WM_T_82572:
11128 	case WM_T_82574:
11129 	case WM_T_82583:
11130 	case WM_T_80003:
11131 	case WM_T_ICH8:
11132 	case WM_T_ICH9:
11133 	case WM_T_ICH10:
11134 	case WM_T_PCH:
11135 	case WM_T_PCH2:
11136 	case WM_T_PCH_LPT:
11137 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11138 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11139 		break;
11140 	default:
11141 		break;
11142 	}
11143 }
11144 
11145 static void
11146 wm_release_hw_control(struct wm_softc *sc)
11147 {
11148 	uint32_t reg;
11149 
11150 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11151 		return;
11152 
11153 	if (sc->sc_type == WM_T_82573) {
11154 		reg = CSR_READ(sc, WMREG_SWSM);
11155 		reg &= ~SWSM_DRV_LOAD;
11156 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11157 	} else {
11158 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11159 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11160 	}
11161 }
11162 
11163 static void
11164 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, int on)
11165 {
11166 	uint32_t reg;
11167 
11168 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11169 
11170 	if (on != 0)
11171 		reg |= EXTCNFCTR_GATE_PHY_CFG;
11172 	else
11173 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11174 
11175 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11176 }
11177 
11178 static void
11179 wm_smbustopci(struct wm_softc *sc)
11180 {
11181 	uint32_t fwsm;
11182 
11183 	fwsm = CSR_READ(sc, WMREG_FWSM);
11184 	if (((fwsm & FWSM_FW_VALID) == 0)
11185 	    && ((wm_phy_resetisblocked(sc) == false))) {
11186 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11187 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11188 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11189 		CSR_WRITE_FLUSH(sc);
11190 		delay(10);
11191 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11192 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11193 		CSR_WRITE_FLUSH(sc);
11194 		delay(50*1000);
11195 
11196 		/*
11197 		 * Gate automatic PHY configuration by hardware on non-managed
11198 		 * 82579
11199 		 */
11200 		if (sc->sc_type == WM_T_PCH2)
11201 			wm_gate_hw_phy_config_ich8lan(sc, 1);
11202 	}
11203 }
11204 
11205 static void
11206 wm_init_manageability(struct wm_softc *sc)
11207 {
11208 
11209 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11210 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11211 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11212 
11213 		/* Disable hardware interception of ARP */
11214 		manc &= ~MANC_ARP_EN;
11215 
11216 		/* Enable receiving management packets to the host */
11217 		if (sc->sc_type >= WM_T_82571) {
11218 			manc |= MANC_EN_MNG2HOST;
11219 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11220 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11221 		}
11222 
11223 		CSR_WRITE(sc, WMREG_MANC, manc);
11224 	}
11225 }
11226 
11227 static void
11228 wm_release_manageability(struct wm_softc *sc)
11229 {
11230 
11231 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11232 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11233 
11234 		manc |= MANC_ARP_EN;
11235 		if (sc->sc_type >= WM_T_82571)
11236 			manc &= ~MANC_EN_MNG2HOST;
11237 
11238 		CSR_WRITE(sc, WMREG_MANC, manc);
11239 	}
11240 }
11241 
11242 static void
11243 wm_get_wakeup(struct wm_softc *sc)
11244 {
11245 
11246 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11247 	switch (sc->sc_type) {
11248 	case WM_T_82573:
11249 	case WM_T_82583:
11250 		sc->sc_flags |= WM_F_HAS_AMT;
11251 		/* FALLTHROUGH */
11252 	case WM_T_80003:
11253 	case WM_T_82541:
11254 	case WM_T_82547:
11255 	case WM_T_82571:
11256 	case WM_T_82572:
11257 	case WM_T_82574:
11258 	case WM_T_82575:
11259 	case WM_T_82576:
11260 	case WM_T_82580:
11261 	case WM_T_I350:
11262 	case WM_T_I354:
11263 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
11264 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11265 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11266 		break;
11267 	case WM_T_ICH8:
11268 	case WM_T_ICH9:
11269 	case WM_T_ICH10:
11270 	case WM_T_PCH:
11271 	case WM_T_PCH2:
11272 	case WM_T_PCH_LPT:
11273 		sc->sc_flags |= WM_F_HAS_AMT;
11274 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11275 		break;
11276 	default:
11277 		break;
11278 	}
11279 
11280 	/* 1: HAS_MANAGE */
11281 	if (wm_enable_mng_pass_thru(sc) != 0)
11282 		sc->sc_flags |= WM_F_HAS_MANAGE;
11283 
11284 #ifdef WM_DEBUG
11285 	printf("\n");
11286 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11287 		printf("HAS_AMT,");
11288 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11289 		printf("ARC_SUBSYS_VALID,");
11290 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11291 		printf("ASF_FIRMWARE_PRES,");
11292 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11293 		printf("HAS_MANAGE,");
11294 	printf("\n");
11295 #endif
11296 	/*
11297 	 * Note that the WOL flags is set after the resetting of the eeprom
11298 	 * stuff
11299 	 */
11300 }
11301 
11302 #ifdef WM_WOL
11303 /* WOL in the newer chipset interfaces (pchlan) */
11304 static void
11305 wm_enable_phy_wakeup(struct wm_softc *sc)
11306 {
11307 #if 0
11308 	uint16_t preg;
11309 
11310 	/* Copy MAC RARs to PHY RARs */
11311 
11312 	/* Copy MAC MTA to PHY MTA */
11313 
11314 	/* Configure PHY Rx Control register */
11315 
11316 	/* Enable PHY wakeup in MAC register */
11317 
11318 	/* Configure and enable PHY wakeup in PHY registers */
11319 
11320 	/* Activate PHY wakeup */
11321 
11322 	/* XXX */
11323 #endif
11324 }
11325 
11326 /* Power down workaround on D3 */
11327 static void
11328 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11329 {
11330 	uint32_t reg;
11331 	int i;
11332 
11333 	for (i = 0; i < 2; i++) {
11334 		/* Disable link */
11335 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11336 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11337 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11338 
11339 		/*
11340 		 * Call gig speed drop workaround on Gig disable before
11341 		 * accessing any PHY registers
11342 		 */
11343 		if (sc->sc_type == WM_T_ICH8)
11344 			wm_gig_downshift_workaround_ich8lan(sc);
11345 
11346 		/* Write VR power-down enable */
11347 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11348 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11349 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11350 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11351 
11352 		/* Read it back and test */
11353 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11354 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11355 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11356 			break;
11357 
11358 		/* Issue PHY reset and repeat at most one more time */
11359 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11360 	}
11361 }
11362 
11363 static void
11364 wm_enable_wakeup(struct wm_softc *sc)
11365 {
11366 	uint32_t reg, pmreg;
11367 	pcireg_t pmode;
11368 
11369 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11370 		&pmreg, NULL) == 0)
11371 		return;
11372 
11373 	/* Advertise the wakeup capability */
11374 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11375 	    | CTRL_SWDPIN(3));
11376 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11377 
11378 	/* ICH workaround */
11379 	switch (sc->sc_type) {
11380 	case WM_T_ICH8:
11381 	case WM_T_ICH9:
11382 	case WM_T_ICH10:
11383 	case WM_T_PCH:
11384 	case WM_T_PCH2:
11385 	case WM_T_PCH_LPT:
11386 		/* Disable gig during WOL */
11387 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11388 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11389 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11390 		if (sc->sc_type == WM_T_PCH)
11391 			wm_gmii_reset(sc);
11392 
11393 		/* Power down workaround */
11394 		if (sc->sc_phytype == WMPHY_82577) {
11395 			struct mii_softc *child;
11396 
11397 			/* Assume that the PHY is copper */
11398 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
11399 			if (child->mii_mpd_rev <= 2)
11400 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
11401 				    (768 << 5) | 25, 0x0444); /* magic num */
11402 		}
11403 		break;
11404 	default:
11405 		break;
11406 	}
11407 
11408 	/* Keep the laser running on fiber adapters */
11409 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
11410 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11411 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11412 		reg |= CTRL_EXT_SWDPIN(3);
11413 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11414 	}
11415 
11416 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
11417 #if 0	/* for the multicast packet */
11418 	reg |= WUFC_MC;
11419 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
11420 #endif
11421 
11422 	if (sc->sc_type == WM_T_PCH) {
11423 		wm_enable_phy_wakeup(sc);
11424 	} else {
11425 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
11426 		CSR_WRITE(sc, WMREG_WUFC, reg);
11427 	}
11428 
11429 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11430 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11431 		|| (sc->sc_type == WM_T_PCH2))
11432 		    && (sc->sc_phytype == WMPHY_IGP_3))
11433 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
11434 
11435 	/* Request PME */
11436 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
11437 #if 0
11438 	/* Disable WOL */
11439 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
11440 #else
11441 	/* For WOL */
11442 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
11443 #endif
11444 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
11445 }
11446 #endif /* WM_WOL */
11447 
11448 /* LPLU */
11449 
11450 static void
11451 wm_lplu_d0_disable(struct wm_softc *sc)
11452 {
11453 	uint32_t reg;
11454 
11455 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
11456 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
11457 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11458 }
11459 
11460 static void
11461 wm_lplu_d0_disable_pch(struct wm_softc *sc)
11462 {
11463 	uint32_t reg;
11464 
11465 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
11466 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
11467 	reg |= HV_OEM_BITS_ANEGNOW;
11468 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
11469 }
11470 
11471 /* EEE */
11472 
11473 static void
11474 wm_set_eee_i350(struct wm_softc *sc)
11475 {
11476 	uint32_t ipcnfg, eeer;
11477 
11478 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
11479 	eeer = CSR_READ(sc, WMREG_EEER);
11480 
11481 	if ((sc->sc_flags & WM_F_EEE) != 0) {
11482 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11483 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
11484 		    | EEER_LPI_FC);
11485 	} else {
11486 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11487 		ipcnfg &= ~IPCNFG_10BASE_TE;
11488 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
11489 		    | EEER_LPI_FC);
11490 	}
11491 
11492 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
11493 	CSR_WRITE(sc, WMREG_EEER, eeer);
11494 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
11495 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
11496 }
11497 
11498 /*
11499  * Workarounds (mainly PHY related).
11500  * Basically, PHY's workarounds are in the PHY drivers.
11501  */
11502 
11503 /* Work-around for 82566 Kumeran PCS lock loss */
11504 static void
11505 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
11506 {
11507 #if 0
11508 	int miistatus, active, i;
11509 	int reg;
11510 
11511 	miistatus = sc->sc_mii.mii_media_status;
11512 
11513 	/* If the link is not up, do nothing */
11514 	if ((miistatus & IFM_ACTIVE) == 0)
11515 		return;
11516 
11517 	active = sc->sc_mii.mii_media_active;
11518 
11519 	/* Nothing to do if the link is other than 1Gbps */
11520 	if (IFM_SUBTYPE(active) != IFM_1000_T)
11521 		return;
11522 
11523 	for (i = 0; i < 10; i++) {
11524 		/* read twice */
11525 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11526 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11527 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
11528 			goto out;	/* GOOD! */
11529 
11530 		/* Reset the PHY */
11531 		wm_gmii_reset(sc);
11532 		delay(5*1000);
11533 	}
11534 
11535 	/* Disable GigE link negotiation */
11536 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
11537 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11538 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11539 
11540 	/*
11541 	 * Call gig speed drop workaround on Gig disable before accessing
11542 	 * any PHY registers.
11543 	 */
11544 	wm_gig_downshift_workaround_ich8lan(sc);
11545 
11546 out:
11547 	return;
11548 #endif
11549 }
11550 
11551 /* WOL from S5 stops working */
11552 static void
11553 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
11554 {
11555 	uint16_t kmrn_reg;
11556 
11557 	/* Only for igp3 */
11558 	if (sc->sc_phytype == WMPHY_IGP_3) {
11559 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
11560 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
11561 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11562 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
11563 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11564 	}
11565 }
11566 
11567 /*
11568  * Workaround for pch's PHYs
11569  * XXX should be moved to new PHY driver?
11570  */
11571 static void
11572 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
11573 {
11574 	if (sc->sc_phytype == WMPHY_82577)
11575 		wm_set_mdio_slow_mode_hv(sc);
11576 
11577 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
11578 
11579 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
11580 
11581 	/* 82578 */
11582 	if (sc->sc_phytype == WMPHY_82578) {
11583 		/* PCH rev. < 3 */
11584 		if (sc->sc_rev < 3) {
11585 			/* XXX 6 bit shift? Why? Is it page2? */
11586 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
11587 			    0x66c0);
11588 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11589 			    0xffff);
11590 		}
11591 
11592 		/* XXX phy rev. < 2 */
11593 	}
11594 
11595 	/* Select page 0 */
11596 
11597 	/* XXX acquire semaphore */
11598 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
11599 	/* XXX release semaphore */
11600 
11601 	/*
11602 	 * Configure the K1 Si workaround during phy reset assuming there is
11603 	 * link so that it disables K1 if link is in 1Gbps.
11604 	 */
11605 	wm_k1_gig_workaround_hv(sc, 1);
11606 }
11607 
11608 static void
11609 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
11610 {
11611 
11612 	wm_set_mdio_slow_mode_hv(sc);
11613 }
11614 
11615 static void
11616 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
11617 {
11618 	int k1_enable = sc->sc_nvm_k1_enabled;
11619 
11620 	/* XXX acquire semaphore */
11621 
11622 	if (link) {
11623 		k1_enable = 0;
11624 
11625 		/* Link stall fix for link up */
11626 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
11627 	} else {
11628 		/* Link stall fix for link down */
11629 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
11630 	}
11631 
11632 	wm_configure_k1_ich8lan(sc, k1_enable);
11633 
11634 	/* XXX release semaphore */
11635 }
11636 
11637 static void
11638 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
11639 {
11640 	uint32_t reg;
11641 
11642 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
11643 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
11644 	    reg | HV_KMRN_MDIO_SLOW);
11645 }
11646 
11647 static void
11648 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
11649 {
11650 	uint32_t ctrl, ctrl_ext, tmp;
11651 	uint16_t kmrn_reg;
11652 
11653 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
11654 
11655 	if (k1_enable)
11656 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
11657 	else
11658 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
11659 
11660 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
11661 
11662 	delay(20);
11663 
11664 	ctrl = CSR_READ(sc, WMREG_CTRL);
11665 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11666 
11667 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
11668 	tmp |= CTRL_FRCSPD;
11669 
11670 	CSR_WRITE(sc, WMREG_CTRL, tmp);
11671 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
11672 	CSR_WRITE_FLUSH(sc);
11673 	delay(20);
11674 
11675 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
11676 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11677 	CSR_WRITE_FLUSH(sc);
11678 	delay(20);
11679 }
11680 
11681 /* special case - for 82575 - need to do manual init ... */
11682 static void
11683 wm_reset_init_script_82575(struct wm_softc *sc)
11684 {
11685 	/*
11686 	 * remark: this is untested code - we have no board without EEPROM
11687 	 *  same setup as mentioned int the FreeBSD driver for the i82575
11688 	 */
11689 
11690 	/* SerDes configuration via SERDESCTRL */
11691 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
11692 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
11693 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
11694 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
11695 
11696 	/* CCM configuration via CCMCTL register */
11697 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
11698 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
11699 
11700 	/* PCIe lanes configuration */
11701 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
11702 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
11703 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
11704 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
11705 
11706 	/* PCIe PLL Configuration */
11707 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
11708 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
11709 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
11710 }
11711 
11712 static void
11713 wm_reset_mdicnfg_82580(struct wm_softc *sc)
11714 {
11715 	uint32_t reg;
11716 	uint16_t nvmword;
11717 	int rv;
11718 
11719 	if ((sc->sc_flags & WM_F_SGMII) == 0)
11720 		return;
11721 
11722 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
11723 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
11724 	if (rv != 0) {
11725 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
11726 		    __func__);
11727 		return;
11728 	}
11729 
11730 	reg = CSR_READ(sc, WMREG_MDICNFG);
11731 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
11732 		reg |= MDICNFG_DEST;
11733 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
11734 		reg |= MDICNFG_COM_MDIO;
11735 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
11736 }
11737 
11738 /*
11739  * I210 Errata 25 and I211 Errata 10
11740  * Slow System Clock.
11741  */
11742 static void
11743 wm_pll_workaround_i210(struct wm_softc *sc)
11744 {
11745 	uint32_t mdicnfg, wuc;
11746 	uint32_t reg;
11747 	pcireg_t pcireg;
11748 	uint32_t pmreg;
11749 	uint16_t nvmword, tmp_nvmword;
11750 	int phyval;
11751 	bool wa_done = false;
11752 	int i;
11753 
11754 	/* Save WUC and MDICNFG registers */
11755 	wuc = CSR_READ(sc, WMREG_WUC);
11756 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
11757 
11758 	reg = mdicnfg & ~MDICNFG_DEST;
11759 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
11760 
11761 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
11762 		nvmword = INVM_DEFAULT_AL;
11763 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
11764 
11765 	/* Get Power Management cap offset */
11766 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11767 		&pmreg, NULL) == 0)
11768 		return;
11769 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
11770 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
11771 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
11772 
11773 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
11774 			break; /* OK */
11775 		}
11776 
11777 		wa_done = true;
11778 		/* Directly reset the internal PHY */
11779 		reg = CSR_READ(sc, WMREG_CTRL);
11780 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
11781 
11782 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11783 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
11784 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11785 
11786 		CSR_WRITE(sc, WMREG_WUC, 0);
11787 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
11788 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11789 
11790 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
11791 		    pmreg + PCI_PMCSR);
11792 		pcireg |= PCI_PMCSR_STATE_D3;
11793 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11794 		    pmreg + PCI_PMCSR, pcireg);
11795 		delay(1000);
11796 		pcireg &= ~PCI_PMCSR_STATE_D3;
11797 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
11798 		    pmreg + PCI_PMCSR, pcireg);
11799 
11800 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
11801 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
11802 
11803 		/* Restore WUC register */
11804 		CSR_WRITE(sc, WMREG_WUC, wuc);
11805 	}
11806 
11807 	/* Restore MDICNFG setting */
11808 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
11809 	if (wa_done)
11810 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
11811 }
11812