xref: /netbsd-src/sys/dev/pci/if_wm.c (revision fdd524d4ccd2bb0c6f67401e938dabf773eb0372)
1 /*	$NetBSD: if_wm.c,v 1.416 2016/07/11 06:14:51 knakahara Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
77  *	- TX Multi queue improvement (refine queue selection logic)
78  *	- Advanced Receive Descriptor
79  *	- EEE (Energy Efficiency Ethernet)
80  *	- Virtual Function
81  *	- Set LED correctly (based on contents in EEPROM)
82  *	- Rework how parameters are loaded from the EEPROM.
83  *	- Image Unique ID
84  *	- restructure evcnt
85  */
86 
87 #include <sys/cdefs.h>
88 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.416 2016/07/11 06:14:51 knakahara Exp $");
89 
90 #ifdef _KERNEL_OPT
91 #include "opt_net_mpsafe.h"
92 #endif
93 
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/callout.h>
97 #include <sys/mbuf.h>
98 #include <sys/malloc.h>
99 #include <sys/kmem.h>
100 #include <sys/kernel.h>
101 #include <sys/socket.h>
102 #include <sys/ioctl.h>
103 #include <sys/errno.h>
104 #include <sys/device.h>
105 #include <sys/queue.h>
106 #include <sys/syslog.h>
107 #include <sys/interrupt.h>
108 #include <sys/cpu.h>
109 #include <sys/pcq.h>
110 
111 #include <sys/rndsource.h>
112 
113 #include <net/if.h>
114 #include <net/if_dl.h>
115 #include <net/if_media.h>
116 #include <net/if_ether.h>
117 
118 #include <net/bpf.h>
119 
120 #include <netinet/in.h>			/* XXX for struct ip */
121 #include <netinet/in_systm.h>		/* XXX for struct ip */
122 #include <netinet/ip.h>			/* XXX for struct ip */
123 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
124 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
125 
126 #include <sys/bus.h>
127 #include <sys/intr.h>
128 #include <machine/endian.h>
129 
130 #include <dev/mii/mii.h>
131 #include <dev/mii/miivar.h>
132 #include <dev/mii/miidevs.h>
133 #include <dev/mii/mii_bitbang.h>
134 #include <dev/mii/ikphyreg.h>
135 #include <dev/mii/igphyreg.h>
136 #include <dev/mii/igphyvar.h>
137 #include <dev/mii/inbmphyreg.h>
138 
139 #include <dev/pci/pcireg.h>
140 #include <dev/pci/pcivar.h>
141 #include <dev/pci/pcidevs.h>
142 
143 #include <dev/pci/if_wmreg.h>
144 #include <dev/pci/if_wmvar.h>
145 
146 #ifdef WM_DEBUG
147 #define	WM_DEBUG_LINK		0x01
148 #define	WM_DEBUG_TX		0x02
149 #define	WM_DEBUG_RX		0x04
150 #define	WM_DEBUG_GMII		0x08
151 #define	WM_DEBUG_MANAGE		0x10
152 #define	WM_DEBUG_NVM		0x20
153 #define	WM_DEBUG_INIT		0x40
154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT;
156 
157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
158 #else
159 #define	DPRINTF(x, y)	/* nothing */
160 #endif /* WM_DEBUG */
161 
162 #ifdef NET_MPSAFE
163 #define WM_MPSAFE	1
164 #endif
165 
166 /*
167  * This device driver's max interrupt numbers.
168  */
169 #define WM_MAX_NQUEUEINTR	16
170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
171 
172 /*
173  * Transmit descriptor list size.  Due to errata, we can only have
174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
175  * on >= 82544.  We tell the upper layers that they can queue a lot
176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177  * of them at a time.
178  *
179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
180  * chains containing many small mbufs have been observed in zero-copy
181  * situations with jumbo frames.
182  */
183 #define	WM_NTXSEGS		256
184 #define	WM_IFQUEUELEN		256
185 #define	WM_TXQUEUELEN_MAX	64
186 #define	WM_TXQUEUELEN_MAX_82547	16
187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
190 #define	WM_NTXDESC_82542	256
191 #define	WM_NTXDESC_82544	4096
192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197 
198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
199 
200 #define	WM_TXINTERQSIZE		256
201 
202 /*
203  * Receive descriptor list size.  We have one Rx buffer for normal
204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
205  * packet.  We allocate 256 receive descriptors, each with a 2k
206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
207  */
208 #define	WM_NRXDESC		256
209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
212 
213 typedef union txdescs {
214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
216 } txdescs_t;
217 
218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
220 
221 /*
222  * Software state for transmit jobs.
223  */
224 struct wm_txsoft {
225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
227 	int txs_firstdesc;		/* first descriptor in packet */
228 	int txs_lastdesc;		/* last descriptor in packet */
229 	int txs_ndesc;			/* # of descriptors used */
230 };
231 
232 /*
233  * Software state for receive buffers.  Each descriptor gets a
234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
235  * more than one buffer, we chain them together.
236  */
237 struct wm_rxsoft {
238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
240 };
241 
242 #define WM_LINKUP_TIMEOUT	50
243 
244 static uint16_t swfwphysem[] = {
245 	SWFW_PHY0_SM,
246 	SWFW_PHY1_SM,
247 	SWFW_PHY2_SM,
248 	SWFW_PHY3_SM
249 };
250 
251 static const uint32_t wm_82580_rxpbs_table[] = {
252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
253 };
254 
255 struct wm_softc;
256 
257 struct wm_txqueue {
258 	kmutex_t *txq_lock;		/* lock for tx operations */
259 
260 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
261 
262 	/* Software state for the transmit descriptors. */
263 	int txq_num;			/* must be a power of two */
264 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
265 
266 	/* TX control data structures. */
267 	int txq_ndesc;			/* must be a power of two */
268 	size_t txq_descsize;		/* a tx descriptor size */
269 	txdescs_t *txq_descs_u;
270         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
271 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
272 	int txq_desc_rseg;		/* real number of control segment */
273 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
274 #define	txq_descs	txq_descs_u->sctxu_txdescs
275 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
276 
277 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
278 
279 	int txq_free;			/* number of free Tx descriptors */
280 	int txq_next;			/* next ready Tx descriptor */
281 
282 	int txq_sfree;			/* number of free Tx jobs */
283 	int txq_snext;			/* next free Tx job */
284 	int txq_sdirty;			/* dirty Tx jobs */
285 
286 	/* These 4 variables are used only on the 82547. */
287 	int txq_fifo_size;		/* Tx FIFO size */
288 	int txq_fifo_head;		/* current head of FIFO */
289 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
290 	int txq_fifo_stall;		/* Tx FIFO is stalled */
291 
292 	/*
293 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
294 	 * CPUs. This queue intermediate them without block.
295 	 */
296 	pcq_t *txq_interq;
297 
298 	/*
299 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
300 	 * to manage Tx H/W queue's busy flag.
301 	 */
302 	int txq_flags;			/* flags for H/W queue, see below */
303 #define	WM_TXQ_NO_SPACE	0x1
304 
305 	/* XXX which event counter is required? */
306 };
307 
308 struct wm_rxqueue {
309 	kmutex_t *rxq_lock;		/* lock for rx operations */
310 
311 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
312 
313 	/* Software state for the receive descriptors. */
314 	wiseman_rxdesc_t *rxq_descs;
315 
316 	/* RX control data structures. */
317 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
318 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
319 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
320 	int rxq_desc_rseg;		/* real number of control segment */
321 	size_t rxq_desc_size;		/* control data size */
322 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
323 
324 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
325 
326 	int rxq_ptr;			/* next ready Rx desc/queue ent */
327 	int rxq_discard;
328 	int rxq_len;
329 	struct mbuf *rxq_head;
330 	struct mbuf *rxq_tail;
331 	struct mbuf **rxq_tailp;
332 
333 	/* XXX which event counter is required? */
334 };
335 
336 struct wm_queue {
337 	int wmq_id;			/* index of transmit and receive queues */
338 	int wmq_intr_idx;		/* index of MSI-X tables */
339 
340 	struct wm_txqueue wmq_txq;
341 	struct wm_rxqueue wmq_rxq;
342 };
343 
344 /*
345  * Software state per device.
346  */
347 struct wm_softc {
348 	device_t sc_dev;		/* generic device information */
349 	bus_space_tag_t sc_st;		/* bus space tag */
350 	bus_space_handle_t sc_sh;	/* bus space handle */
351 	bus_size_t sc_ss;		/* bus space size */
352 	bus_space_tag_t sc_iot;		/* I/O space tag */
353 	bus_space_handle_t sc_ioh;	/* I/O space handle */
354 	bus_size_t sc_ios;		/* I/O space size */
355 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
356 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
357 	bus_size_t sc_flashs;		/* flash registers space size */
358 	off_t sc_flashreg_offset;	/*
359 					 * offset to flash registers from
360 					 * start of BAR
361 					 */
362 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
363 
364 	struct ethercom sc_ethercom;	/* ethernet common data */
365 	struct mii_data sc_mii;		/* MII/media information */
366 
367 	pci_chipset_tag_t sc_pc;
368 	pcitag_t sc_pcitag;
369 	int sc_bus_speed;		/* PCI/PCIX bus speed */
370 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
371 
372 	uint16_t sc_pcidevid;		/* PCI device ID */
373 	wm_chip_type sc_type;		/* MAC type */
374 	int sc_rev;			/* MAC revision */
375 	wm_phy_type sc_phytype;		/* PHY type */
376 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
377 #define	WM_MEDIATYPE_UNKNOWN		0x00
378 #define	WM_MEDIATYPE_FIBER		0x01
379 #define	WM_MEDIATYPE_COPPER		0x02
380 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
381 	int sc_funcid;			/* unit number of the chip (0 to 3) */
382 	int sc_flags;			/* flags; see below */
383 	int sc_if_flags;		/* last if_flags */
384 	int sc_flowflags;		/* 802.3x flow control flags */
385 	int sc_align_tweak;
386 
387 	void *sc_ihs[WM_MAX_NINTR];	/*
388 					 * interrupt cookie.
389 					 * legacy and msi use sc_ihs[0].
390 					 */
391 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
392 	int sc_nintrs;			/* number of interrupts */
393 
394 	int sc_link_intr_idx;		/* index of MSI-X tables */
395 
396 	callout_t sc_tick_ch;		/* tick callout */
397 	bool sc_stopping;
398 
399 	int sc_nvm_ver_major;
400 	int sc_nvm_ver_minor;
401 	int sc_nvm_ver_build;
402 	int sc_nvm_addrbits;		/* NVM address bits */
403 	unsigned int sc_nvm_wordsize;	/* NVM word size */
404 	int sc_ich8_flash_base;
405 	int sc_ich8_flash_bank_size;
406 	int sc_nvm_k1_enabled;
407 
408 	int sc_nqueues;
409 	struct wm_queue *sc_queue;
410 
411 	int sc_affinity_offset;
412 
413 #ifdef WM_EVENT_COUNTERS
414 	/* Event counters. */
415 	struct evcnt sc_ev_txsstall;	/* Tx stalled due to no txs */
416 	struct evcnt sc_ev_txdstall;	/* Tx stalled due to no txd */
417 	struct evcnt sc_ev_txfifo_stall;/* Tx FIFO stalls (82547) */
418 	struct evcnt sc_ev_txdw;	/* Tx descriptor interrupts */
419 	struct evcnt sc_ev_txqe;	/* Tx queue empty interrupts */
420 	struct evcnt sc_ev_rxintr;	/* Rx interrupts */
421 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
422 
423 	struct evcnt sc_ev_rxipsum;	/* IP checksums checked in-bound */
424 	struct evcnt sc_ev_rxtusum;	/* TCP/UDP cksums checked in-bound */
425 	struct evcnt sc_ev_txipsum;	/* IP checksums comp. out-bound */
426 	struct evcnt sc_ev_txtusum;	/* TCP/UDP cksums comp. out-bound */
427 	struct evcnt sc_ev_txtusum6;	/* TCP/UDP v6 cksums comp. out-bound */
428 	struct evcnt sc_ev_txtso;	/* TCP seg offload out-bound (IPv4) */
429 	struct evcnt sc_ev_txtso6;	/* TCP seg offload out-bound (IPv6) */
430 	struct evcnt sc_ev_txtsopain;	/* painful header manip. for TSO */
431 
432 	struct evcnt sc_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
433 	struct evcnt sc_ev_txdrop;	/* Tx packets dropped(too many segs) */
434 
435 	struct evcnt sc_ev_tu;		/* Tx underrun */
436 
437 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
438 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
439 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
440 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
441 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
442 #endif /* WM_EVENT_COUNTERS */
443 
444 	/* This variable are used only on the 82547. */
445 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
446 
447 	uint32_t sc_ctrl;		/* prototype CTRL register */
448 #if 0
449 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
450 #endif
451 	uint32_t sc_icr;		/* prototype interrupt bits */
452 	uint32_t sc_itr;		/* prototype intr throttling reg */
453 	uint32_t sc_tctl;		/* prototype TCTL register */
454 	uint32_t sc_rctl;		/* prototype RCTL register */
455 	uint32_t sc_txcw;		/* prototype TXCW register */
456 	uint32_t sc_tipg;		/* prototype TIPG register */
457 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
458 	uint32_t sc_pba;		/* prototype PBA register */
459 
460 	int sc_tbi_linkup;		/* TBI link status */
461 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
462 	int sc_tbi_serdes_ticks;	/* tbi ticks */
463 
464 	int sc_mchash_type;		/* multicast filter offset */
465 
466 	krndsource_t rnd_source;	/* random source */
467 
468 	kmutex_t *sc_core_lock;		/* lock for softc operations */
469 
470 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
471 };
472 
473 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
474 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
475 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
476 
477 #ifdef WM_MPSAFE
478 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
479 #else
480 #define CALLOUT_FLAGS	0
481 #endif
482 
483 #define	WM_RXCHAIN_RESET(rxq)						\
484 do {									\
485 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
486 	*(rxq)->rxq_tailp = NULL;					\
487 	(rxq)->rxq_len = 0;						\
488 } while (/*CONSTCOND*/0)
489 
490 #define	WM_RXCHAIN_LINK(rxq, m)						\
491 do {									\
492 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
493 	(rxq)->rxq_tailp = &(m)->m_next;				\
494 } while (/*CONSTCOND*/0)
495 
496 #ifdef WM_EVENT_COUNTERS
497 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
498 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
499 #else
500 #define	WM_EVCNT_INCR(ev)	/* nothing */
501 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
502 #endif
503 
504 #define	CSR_READ(sc, reg)						\
505 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
506 #define	CSR_WRITE(sc, reg, val)						\
507 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
508 #define	CSR_WRITE_FLUSH(sc)						\
509 	(void) CSR_READ((sc), WMREG_STATUS)
510 
511 #define ICH8_FLASH_READ32(sc, reg)					\
512 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
513 	    (reg) + sc->sc_flashreg_offset)
514 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
515 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
516 	    (reg) + sc->sc_flashreg_offset, (data))
517 
518 #define ICH8_FLASH_READ16(sc, reg)					\
519 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
520 	    (reg) + sc->sc_flashreg_offset)
521 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
522 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
523 	    (reg) + sc->sc_flashreg_offset, (data))
524 
525 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
526 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
527 
528 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
529 #define	WM_CDTXADDR_HI(txq, x)						\
530 	(sizeof(bus_addr_t) == 8 ?					\
531 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
532 
533 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
534 #define	WM_CDRXADDR_HI(rxq, x)						\
535 	(sizeof(bus_addr_t) == 8 ?					\
536 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
537 
538 /*
539  * Register read/write functions.
540  * Other than CSR_{READ|WRITE}().
541  */
542 #if 0
543 static inline uint32_t wm_io_read(struct wm_softc *, int);
544 #endif
545 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
546 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
547 	uint32_t, uint32_t);
548 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
549 
550 /*
551  * Descriptor sync/init functions.
552  */
553 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
554 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
555 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
556 
557 /*
558  * Device driver interface functions and commonly used functions.
559  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
560  */
561 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
562 static int	wm_match(device_t, cfdata_t, void *);
563 static void	wm_attach(device_t, device_t, void *);
564 static int	wm_detach(device_t, int);
565 static bool	wm_suspend(device_t, const pmf_qual_t *);
566 static bool	wm_resume(device_t, const pmf_qual_t *);
567 static void	wm_watchdog(struct ifnet *);
568 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
569 static void	wm_tick(void *);
570 static int	wm_ifflags_cb(struct ethercom *);
571 static int	wm_ioctl(struct ifnet *, u_long, void *);
572 /* MAC address related */
573 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
574 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
575 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
576 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
577 static void	wm_set_filter(struct wm_softc *);
578 /* Reset and init related */
579 static void	wm_set_vlan(struct wm_softc *);
580 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
581 static void	wm_get_auto_rd_done(struct wm_softc *);
582 static void	wm_lan_init_done(struct wm_softc *);
583 static void	wm_get_cfg_done(struct wm_softc *);
584 static void	wm_initialize_hardware_bits(struct wm_softc *);
585 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
586 static void	wm_reset(struct wm_softc *);
587 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
588 static void	wm_rxdrain(struct wm_rxqueue *);
589 static void	wm_rss_getkey(uint8_t *);
590 static void	wm_init_rss(struct wm_softc *);
591 static void	wm_adjust_qnum(struct wm_softc *, int);
592 static int	wm_setup_legacy(struct wm_softc *);
593 static int	wm_setup_msix(struct wm_softc *);
594 static int	wm_init(struct ifnet *);
595 static int	wm_init_locked(struct ifnet *);
596 static void	wm_stop(struct ifnet *, int);
597 static void	wm_stop_locked(struct ifnet *, int);
598 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
599 static void	wm_82547_txfifo_stall(void *);
600 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
601 /* DMA related */
602 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
603 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
604 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
605 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
606     struct wm_txqueue *);
607 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
608 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
609 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
610     struct wm_rxqueue *);
611 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
612 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
613 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
614 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
615 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
616 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
617 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
618     struct wm_txqueue *);
619 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
620     struct wm_rxqueue *);
621 static int	wm_alloc_txrx_queues(struct wm_softc *);
622 static void	wm_free_txrx_queues(struct wm_softc *);
623 static int	wm_init_txrx_queues(struct wm_softc *);
624 /* Start */
625 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
626     uint32_t *, uint8_t *);
627 static void	wm_start(struct ifnet *);
628 static void	wm_start_locked(struct ifnet *);
629 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
630     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
631 static void	wm_nq_start(struct ifnet *);
632 static void	wm_nq_start_locked(struct ifnet *);
633 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
634 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
635 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
636 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
637 /* Interrupt */
638 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
639 static void	wm_rxeof(struct wm_rxqueue *);
640 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
641 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
642 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
643 static void	wm_linkintr(struct wm_softc *, uint32_t);
644 static int	wm_intr_legacy(void *);
645 static int	wm_txrxintr_msix(void *);
646 static int	wm_linkintr_msix(void *);
647 
648 /*
649  * Media related.
650  * GMII, SGMII, TBI, SERDES and SFP.
651  */
652 /* Common */
653 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
654 /* GMII related */
655 static void	wm_gmii_reset(struct wm_softc *);
656 static int	wm_get_phy_id_82575(struct wm_softc *);
657 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
658 static int	wm_gmii_mediachange(struct ifnet *);
659 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
660 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
661 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
662 static int	wm_gmii_i82543_readreg(device_t, int, int);
663 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
664 static int	wm_gmii_i82544_readreg(device_t, int, int);
665 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
666 static int	wm_gmii_i80003_readreg(device_t, int, int);
667 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
668 static int	wm_gmii_bm_readreg(device_t, int, int);
669 static void	wm_gmii_bm_writereg(device_t, int, int, int);
670 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
671 static int	wm_gmii_hv_readreg(device_t, int, int);
672 static void	wm_gmii_hv_writereg(device_t, int, int, int);
673 static int	wm_gmii_82580_readreg(device_t, int, int);
674 static void	wm_gmii_82580_writereg(device_t, int, int, int);
675 static int	wm_gmii_gs40g_readreg(device_t, int, int);
676 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
677 static void	wm_gmii_statchg(struct ifnet *);
678 static int	wm_kmrn_readreg(struct wm_softc *, int);
679 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
680 /* SGMII */
681 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
682 static int	wm_sgmii_readreg(device_t, int, int);
683 static void	wm_sgmii_writereg(device_t, int, int, int);
684 /* TBI related */
685 static void	wm_tbi_mediainit(struct wm_softc *);
686 static int	wm_tbi_mediachange(struct ifnet *);
687 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
688 static int	wm_check_for_link(struct wm_softc *);
689 static void	wm_tbi_tick(struct wm_softc *);
690 /* SERDES related */
691 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
692 static int	wm_serdes_mediachange(struct ifnet *);
693 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
694 static void	wm_serdes_tick(struct wm_softc *);
695 /* SFP related */
696 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
697 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
698 
699 /*
700  * NVM related.
701  * Microwire, SPI (w/wo EERD) and Flash.
702  */
703 /* Misc functions */
704 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
705 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
706 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
707 /* Microwire */
708 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
709 /* SPI */
710 static int	wm_nvm_ready_spi(struct wm_softc *);
711 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
712 /* Using with EERD */
713 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
714 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
715 /* Flash */
716 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
717     unsigned int *);
718 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
719 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
720 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
721 	uint32_t *);
722 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
723 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
724 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
725 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
726 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
727 /* iNVM */
728 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
729 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
730 /* Lock, detecting NVM type, validate checksum and read */
731 static int	wm_nvm_acquire(struct wm_softc *);
732 static void	wm_nvm_release(struct wm_softc *);
733 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
734 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
735 static int	wm_nvm_validate_checksum(struct wm_softc *);
736 static void	wm_nvm_version_invm(struct wm_softc *);
737 static void	wm_nvm_version(struct wm_softc *);
738 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
739 
740 /*
741  * Hardware semaphores.
742  * Very complexed...
743  */
744 static int	wm_get_swsm_semaphore(struct wm_softc *);
745 static void	wm_put_swsm_semaphore(struct wm_softc *);
746 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
747 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
748 static int	wm_get_swfwhw_semaphore(struct wm_softc *);
749 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
750 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
751 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
752 
753 /*
754  * Management mode and power management related subroutines.
755  * BMC, AMT, suspend/resume and EEE.
756  */
757 #ifdef WM_WOL
758 static int	wm_check_mng_mode(struct wm_softc *);
759 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
760 static int	wm_check_mng_mode_82574(struct wm_softc *);
761 static int	wm_check_mng_mode_generic(struct wm_softc *);
762 #endif
763 static int	wm_enable_mng_pass_thru(struct wm_softc *);
764 static bool	wm_phy_resetisblocked(struct wm_softc *);
765 static void	wm_get_hw_control(struct wm_softc *);
766 static void	wm_release_hw_control(struct wm_softc *);
767 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
768 static void	wm_smbustopci(struct wm_softc *);
769 static void	wm_init_manageability(struct wm_softc *);
770 static void	wm_release_manageability(struct wm_softc *);
771 static void	wm_get_wakeup(struct wm_softc *);
772 #ifdef WM_WOL
773 static void	wm_enable_phy_wakeup(struct wm_softc *);
774 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
775 static void	wm_enable_wakeup(struct wm_softc *);
776 #endif
777 /* LPLU (Low Power Link Up) */
778 static void	wm_lplu_d0_disable(struct wm_softc *);
779 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
780 /* EEE */
781 static void	wm_set_eee_i350(struct wm_softc *);
782 
783 /*
784  * Workarounds (mainly PHY related).
785  * Basically, PHY's workarounds are in the PHY drivers.
786  */
787 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
788 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
789 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
790 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
791 static void	wm_k1_gig_workaround_hv(struct wm_softc *, int);
792 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
793 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
794 static void	wm_reset_init_script_82575(struct wm_softc *);
795 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
796 static void	wm_pll_workaround_i210(struct wm_softc *);
797 
798 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
799     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
800 
801 /*
802  * Devices supported by this driver.
803  */
804 static const struct wm_product {
805 	pci_vendor_id_t		wmp_vendor;
806 	pci_product_id_t	wmp_product;
807 	const char		*wmp_name;
808 	wm_chip_type		wmp_type;
809 	uint32_t		wmp_flags;
810 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
811 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
812 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
813 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
814 #define WMP_MEDIATYPE(x)	((x) & 0x03)
815 } wm_products[] = {
816 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
817 	  "Intel i82542 1000BASE-X Ethernet",
818 	  WM_T_82542_2_1,	WMP_F_FIBER },
819 
820 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
821 	  "Intel i82543GC 1000BASE-X Ethernet",
822 	  WM_T_82543,		WMP_F_FIBER },
823 
824 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
825 	  "Intel i82543GC 1000BASE-T Ethernet",
826 	  WM_T_82543,		WMP_F_COPPER },
827 
828 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
829 	  "Intel i82544EI 1000BASE-T Ethernet",
830 	  WM_T_82544,		WMP_F_COPPER },
831 
832 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
833 	  "Intel i82544EI 1000BASE-X Ethernet",
834 	  WM_T_82544,		WMP_F_FIBER },
835 
836 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
837 	  "Intel i82544GC 1000BASE-T Ethernet",
838 	  WM_T_82544,		WMP_F_COPPER },
839 
840 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
841 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
842 	  WM_T_82544,		WMP_F_COPPER },
843 
844 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
845 	  "Intel i82540EM 1000BASE-T Ethernet",
846 	  WM_T_82540,		WMP_F_COPPER },
847 
848 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
849 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
850 	  WM_T_82540,		WMP_F_COPPER },
851 
852 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
853 	  "Intel i82540EP 1000BASE-T Ethernet",
854 	  WM_T_82540,		WMP_F_COPPER },
855 
856 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
857 	  "Intel i82540EP 1000BASE-T Ethernet",
858 	  WM_T_82540,		WMP_F_COPPER },
859 
860 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
861 	  "Intel i82540EP 1000BASE-T Ethernet",
862 	  WM_T_82540,		WMP_F_COPPER },
863 
864 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
865 	  "Intel i82545EM 1000BASE-T Ethernet",
866 	  WM_T_82545,		WMP_F_COPPER },
867 
868 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
869 	  "Intel i82545GM 1000BASE-T Ethernet",
870 	  WM_T_82545_3,		WMP_F_COPPER },
871 
872 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
873 	  "Intel i82545GM 1000BASE-X Ethernet",
874 	  WM_T_82545_3,		WMP_F_FIBER },
875 
876 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
877 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
878 	  WM_T_82545_3,		WMP_F_SERDES },
879 
880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
881 	  "Intel i82546EB 1000BASE-T Ethernet",
882 	  WM_T_82546,		WMP_F_COPPER },
883 
884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
885 	  "Intel i82546EB 1000BASE-T Ethernet",
886 	  WM_T_82546,		WMP_F_COPPER },
887 
888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
889 	  "Intel i82545EM 1000BASE-X Ethernet",
890 	  WM_T_82545,		WMP_F_FIBER },
891 
892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
893 	  "Intel i82546EB 1000BASE-X Ethernet",
894 	  WM_T_82546,		WMP_F_FIBER },
895 
896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
897 	  "Intel i82546GB 1000BASE-T Ethernet",
898 	  WM_T_82546_3,		WMP_F_COPPER },
899 
900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
901 	  "Intel i82546GB 1000BASE-X Ethernet",
902 	  WM_T_82546_3,		WMP_F_FIBER },
903 
904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
905 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
906 	  WM_T_82546_3,		WMP_F_SERDES },
907 
908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
909 	  "i82546GB quad-port Gigabit Ethernet",
910 	  WM_T_82546_3,		WMP_F_COPPER },
911 
912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
913 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
914 	  WM_T_82546_3,		WMP_F_COPPER },
915 
916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
917 	  "Intel PRO/1000MT (82546GB)",
918 	  WM_T_82546_3,		WMP_F_COPPER },
919 
920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
921 	  "Intel i82541EI 1000BASE-T Ethernet",
922 	  WM_T_82541,		WMP_F_COPPER },
923 
924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
925 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
926 	  WM_T_82541,		WMP_F_COPPER },
927 
928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
929 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
930 	  WM_T_82541,		WMP_F_COPPER },
931 
932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
933 	  "Intel i82541ER 1000BASE-T Ethernet",
934 	  WM_T_82541_2,		WMP_F_COPPER },
935 
936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
937 	  "Intel i82541GI 1000BASE-T Ethernet",
938 	  WM_T_82541_2,		WMP_F_COPPER },
939 
940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
941 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
942 	  WM_T_82541_2,		WMP_F_COPPER },
943 
944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
945 	  "Intel i82541PI 1000BASE-T Ethernet",
946 	  WM_T_82541_2,		WMP_F_COPPER },
947 
948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
949 	  "Intel i82547EI 1000BASE-T Ethernet",
950 	  WM_T_82547,		WMP_F_COPPER },
951 
952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
953 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
954 	  WM_T_82547,		WMP_F_COPPER },
955 
956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
957 	  "Intel i82547GI 1000BASE-T Ethernet",
958 	  WM_T_82547_2,		WMP_F_COPPER },
959 
960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
961 	  "Intel PRO/1000 PT (82571EB)",
962 	  WM_T_82571,		WMP_F_COPPER },
963 
964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
965 	  "Intel PRO/1000 PF (82571EB)",
966 	  WM_T_82571,		WMP_F_FIBER },
967 
968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
969 	  "Intel PRO/1000 PB (82571EB)",
970 	  WM_T_82571,		WMP_F_SERDES },
971 
972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
973 	  "Intel PRO/1000 QT (82571EB)",
974 	  WM_T_82571,		WMP_F_COPPER },
975 
976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
977 	  "Intel PRO/1000 PT Quad Port Server Adapter",
978 	  WM_T_82571,		WMP_F_COPPER, },
979 
980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
981 	  "Intel Gigabit PT Quad Port Server ExpressModule",
982 	  WM_T_82571,		WMP_F_COPPER, },
983 
984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
985 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
986 	  WM_T_82571,		WMP_F_SERDES, },
987 
988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
989 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
990 	  WM_T_82571,		WMP_F_SERDES, },
991 
992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
993 	  "Intel 82571EB Quad 1000baseX Ethernet",
994 	  WM_T_82571,		WMP_F_FIBER, },
995 
996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
997 	  "Intel i82572EI 1000baseT Ethernet",
998 	  WM_T_82572,		WMP_F_COPPER },
999 
1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1001 	  "Intel i82572EI 1000baseX Ethernet",
1002 	  WM_T_82572,		WMP_F_FIBER },
1003 
1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1005 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1006 	  WM_T_82572,		WMP_F_SERDES },
1007 
1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1009 	  "Intel i82572EI 1000baseT Ethernet",
1010 	  WM_T_82572,		WMP_F_COPPER },
1011 
1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1013 	  "Intel i82573E",
1014 	  WM_T_82573,		WMP_F_COPPER },
1015 
1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1017 	  "Intel i82573E IAMT",
1018 	  WM_T_82573,		WMP_F_COPPER },
1019 
1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1021 	  "Intel i82573L Gigabit Ethernet",
1022 	  WM_T_82573,		WMP_F_COPPER },
1023 
1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1025 	  "Intel i82574L",
1026 	  WM_T_82574,		WMP_F_COPPER },
1027 
1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1029 	  "Intel i82574L",
1030 	  WM_T_82574,		WMP_F_COPPER },
1031 
1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1033 	  "Intel i82583V",
1034 	  WM_T_82583,		WMP_F_COPPER },
1035 
1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1037 	  "i80003 dual 1000baseT Ethernet",
1038 	  WM_T_80003,		WMP_F_COPPER },
1039 
1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1041 	  "i80003 dual 1000baseX Ethernet",
1042 	  WM_T_80003,		WMP_F_COPPER },
1043 
1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1045 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1046 	  WM_T_80003,		WMP_F_SERDES },
1047 
1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1049 	  "Intel i80003 1000baseT Ethernet",
1050 	  WM_T_80003,		WMP_F_COPPER },
1051 
1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1053 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1054 	  WM_T_80003,		WMP_F_SERDES },
1055 
1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1057 	  "Intel i82801H (M_AMT) LAN Controller",
1058 	  WM_T_ICH8,		WMP_F_COPPER },
1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1060 	  "Intel i82801H (AMT) LAN Controller",
1061 	  WM_T_ICH8,		WMP_F_COPPER },
1062 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1063 	  "Intel i82801H LAN Controller",
1064 	  WM_T_ICH8,		WMP_F_COPPER },
1065 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1066 	  "Intel i82801H (IFE) LAN Controller",
1067 	  WM_T_ICH8,		WMP_F_COPPER },
1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1069 	  "Intel i82801H (M) LAN Controller",
1070 	  WM_T_ICH8,		WMP_F_COPPER },
1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1072 	  "Intel i82801H IFE (GT) LAN Controller",
1073 	  WM_T_ICH8,		WMP_F_COPPER },
1074 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1075 	  "Intel i82801H IFE (G) LAN Controller",
1076 	  WM_T_ICH8,		WMP_F_COPPER },
1077 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1078 	  "82801I (AMT) LAN Controller",
1079 	  WM_T_ICH9,		WMP_F_COPPER },
1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1081 	  "82801I LAN Controller",
1082 	  WM_T_ICH9,		WMP_F_COPPER },
1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1084 	  "82801I (G) LAN Controller",
1085 	  WM_T_ICH9,		WMP_F_COPPER },
1086 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1087 	  "82801I (GT) LAN Controller",
1088 	  WM_T_ICH9,		WMP_F_COPPER },
1089 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1090 	  "82801I (C) LAN Controller",
1091 	  WM_T_ICH9,		WMP_F_COPPER },
1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1093 	  "82801I mobile LAN Controller",
1094 	  WM_T_ICH9,		WMP_F_COPPER },
1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1096 	  "82801I mobile (V) LAN Controller",
1097 	  WM_T_ICH9,		WMP_F_COPPER },
1098 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1099 	  "82801I mobile (AMT) LAN Controller",
1100 	  WM_T_ICH9,		WMP_F_COPPER },
1101 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1102 	  "82567LM-4 LAN Controller",
1103 	  WM_T_ICH9,		WMP_F_COPPER },
1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_82567V_3,
1105 	  "82567V-3 LAN Controller",
1106 	  WM_T_ICH9,		WMP_F_COPPER },
1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1108 	  "82567LM-2 LAN Controller",
1109 	  WM_T_ICH10,		WMP_F_COPPER },
1110 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1111 	  "82567LF-2 LAN Controller",
1112 	  WM_T_ICH10,		WMP_F_COPPER },
1113 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1114 	  "82567LM-3 LAN Controller",
1115 	  WM_T_ICH10,		WMP_F_COPPER },
1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1117 	  "82567LF-3 LAN Controller",
1118 	  WM_T_ICH10,		WMP_F_COPPER },
1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1120 	  "82567V-2 LAN Controller",
1121 	  WM_T_ICH10,		WMP_F_COPPER },
1122 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1123 	  "82567V-3? LAN Controller",
1124 	  WM_T_ICH10,		WMP_F_COPPER },
1125 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1126 	  "HANKSVILLE LAN Controller",
1127 	  WM_T_ICH10,		WMP_F_COPPER },
1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1129 	  "PCH LAN (82577LM) Controller",
1130 	  WM_T_PCH,		WMP_F_COPPER },
1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1132 	  "PCH LAN (82577LC) Controller",
1133 	  WM_T_PCH,		WMP_F_COPPER },
1134 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1135 	  "PCH LAN (82578DM) Controller",
1136 	  WM_T_PCH,		WMP_F_COPPER },
1137 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1138 	  "PCH LAN (82578DC) Controller",
1139 	  WM_T_PCH,		WMP_F_COPPER },
1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1141 	  "PCH2 LAN (82579LM) Controller",
1142 	  WM_T_PCH2,		WMP_F_COPPER },
1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1144 	  "PCH2 LAN (82579V) Controller",
1145 	  WM_T_PCH2,		WMP_F_COPPER },
1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1147 	  "82575EB dual-1000baseT Ethernet",
1148 	  WM_T_82575,		WMP_F_COPPER },
1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1150 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1151 	  WM_T_82575,		WMP_F_SERDES },
1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1153 	  "82575GB quad-1000baseT Ethernet",
1154 	  WM_T_82575,		WMP_F_COPPER },
1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1156 	  "82575GB quad-1000baseT Ethernet (PM)",
1157 	  WM_T_82575,		WMP_F_COPPER },
1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1159 	  "82576 1000BaseT Ethernet",
1160 	  WM_T_82576,		WMP_F_COPPER },
1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1162 	  "82576 1000BaseX Ethernet",
1163 	  WM_T_82576,		WMP_F_FIBER },
1164 
1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1166 	  "82576 gigabit Ethernet (SERDES)",
1167 	  WM_T_82576,		WMP_F_SERDES },
1168 
1169 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1170 	  "82576 quad-1000BaseT Ethernet",
1171 	  WM_T_82576,		WMP_F_COPPER },
1172 
1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1174 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1175 	  WM_T_82576,		WMP_F_COPPER },
1176 
1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1178 	  "82576 gigabit Ethernet",
1179 	  WM_T_82576,		WMP_F_COPPER },
1180 
1181 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1182 	  "82576 gigabit Ethernet (SERDES)",
1183 	  WM_T_82576,		WMP_F_SERDES },
1184 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1185 	  "82576 quad-gigabit Ethernet (SERDES)",
1186 	  WM_T_82576,		WMP_F_SERDES },
1187 
1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1189 	  "82580 1000BaseT Ethernet",
1190 	  WM_T_82580,		WMP_F_COPPER },
1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1192 	  "82580 1000BaseX Ethernet",
1193 	  WM_T_82580,		WMP_F_FIBER },
1194 
1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1196 	  "82580 1000BaseT Ethernet (SERDES)",
1197 	  WM_T_82580,		WMP_F_SERDES },
1198 
1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1200 	  "82580 gigabit Ethernet (SGMII)",
1201 	  WM_T_82580,		WMP_F_COPPER },
1202 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1203 	  "82580 dual-1000BaseT Ethernet",
1204 	  WM_T_82580,		WMP_F_COPPER },
1205 
1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1207 	  "82580 quad-1000BaseX Ethernet",
1208 	  WM_T_82580,		WMP_F_FIBER },
1209 
1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1211 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1212 	  WM_T_82580,		WMP_F_COPPER },
1213 
1214 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1215 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1216 	  WM_T_82580,		WMP_F_SERDES },
1217 
1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1219 	  "DH89XXCC 1000BASE-KX Ethernet",
1220 	  WM_T_82580,		WMP_F_SERDES },
1221 
1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1223 	  "DH89XXCC Gigabit Ethernet (SFP)",
1224 	  WM_T_82580,		WMP_F_SERDES },
1225 
1226 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1227 	  "I350 Gigabit Network Connection",
1228 	  WM_T_I350,		WMP_F_COPPER },
1229 
1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1231 	  "I350 Gigabit Fiber Network Connection",
1232 	  WM_T_I350,		WMP_F_FIBER },
1233 
1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1235 	  "I350 Gigabit Backplane Connection",
1236 	  WM_T_I350,		WMP_F_SERDES },
1237 
1238 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1239 	  "I350 Quad Port Gigabit Ethernet",
1240 	  WM_T_I350,		WMP_F_SERDES },
1241 
1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1243 	  "I350 Gigabit Connection",
1244 	  WM_T_I350,		WMP_F_COPPER },
1245 
1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1247 	  "I354 Gigabit Ethernet (KX)",
1248 	  WM_T_I354,		WMP_F_SERDES },
1249 
1250 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1251 	  "I354 Gigabit Ethernet (SGMII)",
1252 	  WM_T_I354,		WMP_F_COPPER },
1253 
1254 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1255 	  "I354 Gigabit Ethernet (2.5G)",
1256 	  WM_T_I354,		WMP_F_COPPER },
1257 
1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1259 	  "I210-T1 Ethernet Server Adapter",
1260 	  WM_T_I210,		WMP_F_COPPER },
1261 
1262 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1263 	  "I210 Ethernet (Copper OEM)",
1264 	  WM_T_I210,		WMP_F_COPPER },
1265 
1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1267 	  "I210 Ethernet (Copper IT)",
1268 	  WM_T_I210,		WMP_F_COPPER },
1269 
1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1271 	  "I210 Ethernet (FLASH less)",
1272 	  WM_T_I210,		WMP_F_COPPER },
1273 
1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1275 	  "I210 Gigabit Ethernet (Fiber)",
1276 	  WM_T_I210,		WMP_F_FIBER },
1277 
1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1279 	  "I210 Gigabit Ethernet (SERDES)",
1280 	  WM_T_I210,		WMP_F_SERDES },
1281 
1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1283 	  "I210 Gigabit Ethernet (FLASH less)",
1284 	  WM_T_I210,		WMP_F_SERDES },
1285 
1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1287 	  "I210 Gigabit Ethernet (SGMII)",
1288 	  WM_T_I210,		WMP_F_COPPER },
1289 
1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1291 	  "I211 Ethernet (COPPER)",
1292 	  WM_T_I211,		WMP_F_COPPER },
1293 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1294 	  "I217 V Ethernet Connection",
1295 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1296 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1297 	  "I217 LM Ethernet Connection",
1298 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1299 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1300 	  "I218 V Ethernet Connection",
1301 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1303 	  "I218 V Ethernet Connection",
1304 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1305 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1306 	  "I218 V Ethernet Connection",
1307 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1308 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1309 	  "I218 LM Ethernet Connection",
1310 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1311 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1312 	  "I218 LM Ethernet Connection",
1313 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1315 	  "I218 LM Ethernet Connection",
1316 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1317 #if 0
1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1319 	  "I219 V Ethernet Connection",
1320 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1322 	  "I219 V Ethernet Connection",
1323 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1325 	  "I219 LM Ethernet Connection",
1326 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1328 	  "I219 LM Ethernet Connection",
1329 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1330 #endif
1331 	{ 0,			0,
1332 	  NULL,
1333 	  0,			0 },
1334 };
1335 
1336 #ifdef WM_EVENT_COUNTERS
1337 static char wm_txseg_evcnt_names[WM_NTXSEGS][sizeof("txsegXXX")];
1338 #endif /* WM_EVENT_COUNTERS */
1339 
1340 
1341 /*
1342  * Register read/write functions.
1343  * Other than CSR_{READ|WRITE}().
1344  */
1345 
1346 #if 0 /* Not currently used */
1347 static inline uint32_t
1348 wm_io_read(struct wm_softc *sc, int reg)
1349 {
1350 
1351 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1352 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1353 }
1354 #endif
1355 
1356 static inline void
1357 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1358 {
1359 
1360 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1361 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1362 }
1363 
1364 static inline void
1365 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1366     uint32_t data)
1367 {
1368 	uint32_t regval;
1369 	int i;
1370 
1371 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1372 
1373 	CSR_WRITE(sc, reg, regval);
1374 
1375 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1376 		delay(5);
1377 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1378 			break;
1379 	}
1380 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1381 		aprint_error("%s: WARNING:"
1382 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1383 		    device_xname(sc->sc_dev), reg);
1384 	}
1385 }
1386 
1387 static inline void
1388 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1389 {
1390 	wa->wa_low = htole32(v & 0xffffffffU);
1391 	if (sizeof(bus_addr_t) == 8)
1392 		wa->wa_high = htole32((uint64_t) v >> 32);
1393 	else
1394 		wa->wa_high = 0;
1395 }
1396 
1397 /*
1398  * Descriptor sync/init functions.
1399  */
1400 static inline void
1401 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1402 {
1403 	struct wm_softc *sc = txq->txq_sc;
1404 
1405 	/* If it will wrap around, sync to the end of the ring. */
1406 	if ((start + num) > WM_NTXDESC(txq)) {
1407 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1408 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1409 		    (WM_NTXDESC(txq) - start), ops);
1410 		num -= (WM_NTXDESC(txq) - start);
1411 		start = 0;
1412 	}
1413 
1414 	/* Now sync whatever is left. */
1415 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1416 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1417 }
1418 
1419 static inline void
1420 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1421 {
1422 	struct wm_softc *sc = rxq->rxq_sc;
1423 
1424 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1425 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1426 }
1427 
1428 static inline void
1429 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1430 {
1431 	struct wm_softc *sc = rxq->rxq_sc;
1432 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1433 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1434 	struct mbuf *m = rxs->rxs_mbuf;
1435 
1436 	/*
1437 	 * Note: We scoot the packet forward 2 bytes in the buffer
1438 	 * so that the payload after the Ethernet header is aligned
1439 	 * to a 4-byte boundary.
1440 
1441 	 * XXX BRAINDAMAGE ALERT!
1442 	 * The stupid chip uses the same size for every buffer, which
1443 	 * is set in the Receive Control register.  We are using the 2K
1444 	 * size option, but what we REALLY want is (2K - 2)!  For this
1445 	 * reason, we can't "scoot" packets longer than the standard
1446 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1447 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1448 	 * the upper layer copy the headers.
1449 	 */
1450 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1451 
1452 	wm_set_dma_addr(&rxd->wrx_addr,
1453 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1454 	rxd->wrx_len = 0;
1455 	rxd->wrx_cksum = 0;
1456 	rxd->wrx_status = 0;
1457 	rxd->wrx_errors = 0;
1458 	rxd->wrx_special = 0;
1459 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1460 
1461 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1462 }
1463 
1464 /*
1465  * Device driver interface functions and commonly used functions.
1466  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1467  */
1468 
1469 /* Lookup supported device table */
1470 static const struct wm_product *
1471 wm_lookup(const struct pci_attach_args *pa)
1472 {
1473 	const struct wm_product *wmp;
1474 
1475 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1476 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1477 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1478 			return wmp;
1479 	}
1480 	return NULL;
1481 }
1482 
1483 /* The match function (ca_match) */
1484 static int
1485 wm_match(device_t parent, cfdata_t cf, void *aux)
1486 {
1487 	struct pci_attach_args *pa = aux;
1488 
1489 	if (wm_lookup(pa) != NULL)
1490 		return 1;
1491 
1492 	return 0;
1493 }
1494 
1495 /* The attach function (ca_attach) */
1496 static void
1497 wm_attach(device_t parent, device_t self, void *aux)
1498 {
1499 	struct wm_softc *sc = device_private(self);
1500 	struct pci_attach_args *pa = aux;
1501 	prop_dictionary_t dict;
1502 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1503 	pci_chipset_tag_t pc = pa->pa_pc;
1504 	int counts[PCI_INTR_TYPE_SIZE];
1505 	pci_intr_type_t max_type;
1506 	const char *eetype, *xname;
1507 	bus_space_tag_t memt;
1508 	bus_space_handle_t memh;
1509 	bus_size_t memsize;
1510 	int memh_valid;
1511 	int i, error;
1512 	const struct wm_product *wmp;
1513 	prop_data_t ea;
1514 	prop_number_t pn;
1515 	uint8_t enaddr[ETHER_ADDR_LEN];
1516 	uint16_t cfg1, cfg2, swdpin, nvmword;
1517 	pcireg_t preg, memtype;
1518 	uint16_t eeprom_data, apme_mask;
1519 	bool force_clear_smbi;
1520 	uint32_t link_mode;
1521 	uint32_t reg;
1522 
1523 	sc->sc_dev = self;
1524 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1525 	sc->sc_stopping = false;
1526 
1527 	wmp = wm_lookup(pa);
1528 #ifdef DIAGNOSTIC
1529 	if (wmp == NULL) {
1530 		printf("\n");
1531 		panic("wm_attach: impossible");
1532 	}
1533 #endif
1534 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1535 
1536 	sc->sc_pc = pa->pa_pc;
1537 	sc->sc_pcitag = pa->pa_tag;
1538 
1539 	if (pci_dma64_available(pa))
1540 		sc->sc_dmat = pa->pa_dmat64;
1541 	else
1542 		sc->sc_dmat = pa->pa_dmat;
1543 
1544 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1545 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1546 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1547 
1548 	sc->sc_type = wmp->wmp_type;
1549 	if (sc->sc_type < WM_T_82543) {
1550 		if (sc->sc_rev < 2) {
1551 			aprint_error_dev(sc->sc_dev,
1552 			    "i82542 must be at least rev. 2\n");
1553 			return;
1554 		}
1555 		if (sc->sc_rev < 3)
1556 			sc->sc_type = WM_T_82542_2_0;
1557 	}
1558 
1559 	/*
1560 	 * Disable MSI for Errata:
1561 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1562 	 *
1563 	 *  82544: Errata 25
1564 	 *  82540: Errata  6 (easy to reproduce device timeout)
1565 	 *  82545: Errata  4 (easy to reproduce device timeout)
1566 	 *  82546: Errata 26 (easy to reproduce device timeout)
1567 	 *  82541: Errata  7 (easy to reproduce device timeout)
1568 	 *
1569 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1570 	 *
1571 	 *  82571 & 82572: Errata 63
1572 	 */
1573 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1574 	    || (sc->sc_type == WM_T_82572))
1575 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1576 
1577 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1578 	    || (sc->sc_type == WM_T_82580)
1579 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1580 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1581 		sc->sc_flags |= WM_F_NEWQUEUE;
1582 
1583 	/* Set device properties (mactype) */
1584 	dict = device_properties(sc->sc_dev);
1585 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1586 
1587 	/*
1588 	 * Map the device.  All devices support memory-mapped acccess,
1589 	 * and it is really required for normal operation.
1590 	 */
1591 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1592 	switch (memtype) {
1593 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1594 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1595 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1596 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1597 		break;
1598 	default:
1599 		memh_valid = 0;
1600 		break;
1601 	}
1602 
1603 	if (memh_valid) {
1604 		sc->sc_st = memt;
1605 		sc->sc_sh = memh;
1606 		sc->sc_ss = memsize;
1607 	} else {
1608 		aprint_error_dev(sc->sc_dev,
1609 		    "unable to map device registers\n");
1610 		return;
1611 	}
1612 
1613 	/*
1614 	 * In addition, i82544 and later support I/O mapped indirect
1615 	 * register access.  It is not desirable (nor supported in
1616 	 * this driver) to use it for normal operation, though it is
1617 	 * required to work around bugs in some chip versions.
1618 	 */
1619 	if (sc->sc_type >= WM_T_82544) {
1620 		/* First we have to find the I/O BAR. */
1621 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1622 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1623 			if (memtype == PCI_MAPREG_TYPE_IO)
1624 				break;
1625 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1626 			    PCI_MAPREG_MEM_TYPE_64BIT)
1627 				i += 4;	/* skip high bits, too */
1628 		}
1629 		if (i < PCI_MAPREG_END) {
1630 			/*
1631 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1632 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1633 			 * It's no problem because newer chips has no this
1634 			 * bug.
1635 			 *
1636 			 * The i8254x doesn't apparently respond when the
1637 			 * I/O BAR is 0, which looks somewhat like it's not
1638 			 * been configured.
1639 			 */
1640 			preg = pci_conf_read(pc, pa->pa_tag, i);
1641 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1642 				aprint_error_dev(sc->sc_dev,
1643 				    "WARNING: I/O BAR at zero.\n");
1644 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1645 					0, &sc->sc_iot, &sc->sc_ioh,
1646 					NULL, &sc->sc_ios) == 0) {
1647 				sc->sc_flags |= WM_F_IOH_VALID;
1648 			} else {
1649 				aprint_error_dev(sc->sc_dev,
1650 				    "WARNING: unable to map I/O space\n");
1651 			}
1652 		}
1653 
1654 	}
1655 
1656 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1657 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1658 	preg |= PCI_COMMAND_MASTER_ENABLE;
1659 	if (sc->sc_type < WM_T_82542_2_1)
1660 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1661 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1662 
1663 	/* power up chip */
1664 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1665 	    NULL)) && error != EOPNOTSUPP) {
1666 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1667 		return;
1668 	}
1669 
1670 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1671 
1672 	/* Allocation settings */
1673 	max_type = PCI_INTR_TYPE_MSIX;
1674 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1675 	counts[PCI_INTR_TYPE_MSI] = 1;
1676 	counts[PCI_INTR_TYPE_INTX] = 1;
1677 
1678 alloc_retry:
1679 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1680 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1681 		return;
1682 	}
1683 
1684 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1685 		error = wm_setup_msix(sc);
1686 		if (error) {
1687 			pci_intr_release(pc, sc->sc_intrs,
1688 			    counts[PCI_INTR_TYPE_MSIX]);
1689 
1690 			/* Setup for MSI: Disable MSI-X */
1691 			max_type = PCI_INTR_TYPE_MSI;
1692 			counts[PCI_INTR_TYPE_MSI] = 1;
1693 			counts[PCI_INTR_TYPE_INTX] = 1;
1694 			goto alloc_retry;
1695 		}
1696 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1697 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1698 		error = wm_setup_legacy(sc);
1699 		if (error) {
1700 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1701 			    counts[PCI_INTR_TYPE_MSI]);
1702 
1703 			/* The next try is for INTx: Disable MSI */
1704 			max_type = PCI_INTR_TYPE_INTX;
1705 			counts[PCI_INTR_TYPE_INTX] = 1;
1706 			goto alloc_retry;
1707 		}
1708 	} else {
1709 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1710 		error = wm_setup_legacy(sc);
1711 		if (error) {
1712 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1713 			    counts[PCI_INTR_TYPE_INTX]);
1714 			return;
1715 		}
1716 	}
1717 
1718 	/*
1719 	 * Check the function ID (unit number of the chip).
1720 	 */
1721 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1722 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1723 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1724 	    || (sc->sc_type == WM_T_82580)
1725 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1726 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1727 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1728 	else
1729 		sc->sc_funcid = 0;
1730 
1731 	/*
1732 	 * Determine a few things about the bus we're connected to.
1733 	 */
1734 	if (sc->sc_type < WM_T_82543) {
1735 		/* We don't really know the bus characteristics here. */
1736 		sc->sc_bus_speed = 33;
1737 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1738 		/*
1739 		 * CSA (Communication Streaming Architecture) is about as fast
1740 		 * a 32-bit 66MHz PCI Bus.
1741 		 */
1742 		sc->sc_flags |= WM_F_CSA;
1743 		sc->sc_bus_speed = 66;
1744 		aprint_verbose_dev(sc->sc_dev,
1745 		    "Communication Streaming Architecture\n");
1746 		if (sc->sc_type == WM_T_82547) {
1747 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1748 			callout_setfunc(&sc->sc_txfifo_ch,
1749 					wm_82547_txfifo_stall, sc);
1750 			aprint_verbose_dev(sc->sc_dev,
1751 			    "using 82547 Tx FIFO stall work-around\n");
1752 		}
1753 	} else if (sc->sc_type >= WM_T_82571) {
1754 		sc->sc_flags |= WM_F_PCIE;
1755 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1756 		    && (sc->sc_type != WM_T_ICH10)
1757 		    && (sc->sc_type != WM_T_PCH)
1758 		    && (sc->sc_type != WM_T_PCH2)
1759 		    && (sc->sc_type != WM_T_PCH_LPT)
1760 		    && (sc->sc_type != WM_T_PCH_SPT)) {
1761 			/* ICH* and PCH* have no PCIe capability registers */
1762 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1763 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1764 				NULL) == 0)
1765 				aprint_error_dev(sc->sc_dev,
1766 				    "unable to find PCIe capability\n");
1767 		}
1768 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1769 	} else {
1770 		reg = CSR_READ(sc, WMREG_STATUS);
1771 		if (reg & STATUS_BUS64)
1772 			sc->sc_flags |= WM_F_BUS64;
1773 		if ((reg & STATUS_PCIX_MODE) != 0) {
1774 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1775 
1776 			sc->sc_flags |= WM_F_PCIX;
1777 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1778 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1779 				aprint_error_dev(sc->sc_dev,
1780 				    "unable to find PCIX capability\n");
1781 			else if (sc->sc_type != WM_T_82545_3 &&
1782 				 sc->sc_type != WM_T_82546_3) {
1783 				/*
1784 				 * Work around a problem caused by the BIOS
1785 				 * setting the max memory read byte count
1786 				 * incorrectly.
1787 				 */
1788 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1789 				    sc->sc_pcixe_capoff + PCIX_CMD);
1790 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1791 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1792 
1793 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1794 				    PCIX_CMD_BYTECNT_SHIFT;
1795 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1796 				    PCIX_STATUS_MAXB_SHIFT;
1797 				if (bytecnt > maxb) {
1798 					aprint_verbose_dev(sc->sc_dev,
1799 					    "resetting PCI-X MMRBC: %d -> %d\n",
1800 					    512 << bytecnt, 512 << maxb);
1801 					pcix_cmd = (pcix_cmd &
1802 					    ~PCIX_CMD_BYTECNT_MASK) |
1803 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1804 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1805 					    sc->sc_pcixe_capoff + PCIX_CMD,
1806 					    pcix_cmd);
1807 				}
1808 			}
1809 		}
1810 		/*
1811 		 * The quad port adapter is special; it has a PCIX-PCIX
1812 		 * bridge on the board, and can run the secondary bus at
1813 		 * a higher speed.
1814 		 */
1815 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1816 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1817 								      : 66;
1818 		} else if (sc->sc_flags & WM_F_PCIX) {
1819 			switch (reg & STATUS_PCIXSPD_MASK) {
1820 			case STATUS_PCIXSPD_50_66:
1821 				sc->sc_bus_speed = 66;
1822 				break;
1823 			case STATUS_PCIXSPD_66_100:
1824 				sc->sc_bus_speed = 100;
1825 				break;
1826 			case STATUS_PCIXSPD_100_133:
1827 				sc->sc_bus_speed = 133;
1828 				break;
1829 			default:
1830 				aprint_error_dev(sc->sc_dev,
1831 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1832 				    reg & STATUS_PCIXSPD_MASK);
1833 				sc->sc_bus_speed = 66;
1834 				break;
1835 			}
1836 		} else
1837 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1838 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1839 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1840 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1841 	}
1842 
1843 	/* clear interesting stat counters */
1844 	CSR_READ(sc, WMREG_COLC);
1845 	CSR_READ(sc, WMREG_RXERRC);
1846 
1847 	/* get PHY control from SMBus to PCIe */
1848 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1849 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
1850 		wm_smbustopci(sc);
1851 
1852 	/* Reset the chip to a known state. */
1853 	wm_reset(sc);
1854 
1855 	/* Get some information about the EEPROM. */
1856 	switch (sc->sc_type) {
1857 	case WM_T_82542_2_0:
1858 	case WM_T_82542_2_1:
1859 	case WM_T_82543:
1860 	case WM_T_82544:
1861 		/* Microwire */
1862 		sc->sc_nvm_wordsize = 64;
1863 		sc->sc_nvm_addrbits = 6;
1864 		break;
1865 	case WM_T_82540:
1866 	case WM_T_82545:
1867 	case WM_T_82545_3:
1868 	case WM_T_82546:
1869 	case WM_T_82546_3:
1870 		/* Microwire */
1871 		reg = CSR_READ(sc, WMREG_EECD);
1872 		if (reg & EECD_EE_SIZE) {
1873 			sc->sc_nvm_wordsize = 256;
1874 			sc->sc_nvm_addrbits = 8;
1875 		} else {
1876 			sc->sc_nvm_wordsize = 64;
1877 			sc->sc_nvm_addrbits = 6;
1878 		}
1879 		sc->sc_flags |= WM_F_LOCK_EECD;
1880 		break;
1881 	case WM_T_82541:
1882 	case WM_T_82541_2:
1883 	case WM_T_82547:
1884 	case WM_T_82547_2:
1885 		sc->sc_flags |= WM_F_LOCK_EECD;
1886 		reg = CSR_READ(sc, WMREG_EECD);
1887 		if (reg & EECD_EE_TYPE) {
1888 			/* SPI */
1889 			sc->sc_flags |= WM_F_EEPROM_SPI;
1890 			wm_nvm_set_addrbits_size_eecd(sc);
1891 		} else {
1892 			/* Microwire */
1893 			if ((reg & EECD_EE_ABITS) != 0) {
1894 				sc->sc_nvm_wordsize = 256;
1895 				sc->sc_nvm_addrbits = 8;
1896 			} else {
1897 				sc->sc_nvm_wordsize = 64;
1898 				sc->sc_nvm_addrbits = 6;
1899 			}
1900 		}
1901 		break;
1902 	case WM_T_82571:
1903 	case WM_T_82572:
1904 		/* SPI */
1905 		sc->sc_flags |= WM_F_EEPROM_SPI;
1906 		wm_nvm_set_addrbits_size_eecd(sc);
1907 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1908 		break;
1909 	case WM_T_82573:
1910 		sc->sc_flags |= WM_F_LOCK_SWSM;
1911 		/* FALLTHROUGH */
1912 	case WM_T_82574:
1913 	case WM_T_82583:
1914 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
1915 			sc->sc_flags |= WM_F_EEPROM_FLASH;
1916 			sc->sc_nvm_wordsize = 2048;
1917 		} else {
1918 			/* SPI */
1919 			sc->sc_flags |= WM_F_EEPROM_SPI;
1920 			wm_nvm_set_addrbits_size_eecd(sc);
1921 		}
1922 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
1923 		break;
1924 	case WM_T_82575:
1925 	case WM_T_82576:
1926 	case WM_T_82580:
1927 	case WM_T_I350:
1928 	case WM_T_I354:
1929 	case WM_T_80003:
1930 		/* SPI */
1931 		sc->sc_flags |= WM_F_EEPROM_SPI;
1932 		wm_nvm_set_addrbits_size_eecd(sc);
1933 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
1934 		    | WM_F_LOCK_SWSM;
1935 		break;
1936 	case WM_T_ICH8:
1937 	case WM_T_ICH9:
1938 	case WM_T_ICH10:
1939 	case WM_T_PCH:
1940 	case WM_T_PCH2:
1941 	case WM_T_PCH_LPT:
1942 		/* FLASH */
1943 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1944 		sc->sc_nvm_wordsize = 2048;
1945 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
1946 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
1947 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
1948 			aprint_error_dev(sc->sc_dev,
1949 			    "can't map FLASH registers\n");
1950 			goto out;
1951 		}
1952 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
1953 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
1954 		    ICH_FLASH_SECTOR_SIZE;
1955 		sc->sc_ich8_flash_bank_size =
1956 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
1957 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
1958 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
1959 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
1960 		sc->sc_flashreg_offset = 0;
1961 		break;
1962 	case WM_T_PCH_SPT:
1963 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
1964 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
1965 		sc->sc_flasht = sc->sc_st;
1966 		sc->sc_flashh = sc->sc_sh;
1967 		sc->sc_ich8_flash_base = 0;
1968 		sc->sc_nvm_wordsize =
1969 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
1970 			* NVM_SIZE_MULTIPLIER;
1971 		/* It is size in bytes, we want words */
1972 		sc->sc_nvm_wordsize /= 2;
1973 		/* assume 2 banks */
1974 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
1975 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
1976 		break;
1977 	case WM_T_I210:
1978 	case WM_T_I211:
1979 		if (wm_nvm_get_flash_presence_i210(sc)) {
1980 			wm_nvm_set_addrbits_size_eecd(sc);
1981 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
1982 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW;
1983 		} else {
1984 			sc->sc_nvm_wordsize = INVM_SIZE;
1985 			sc->sc_flags |= WM_F_EEPROM_INVM;
1986 			sc->sc_flags |= WM_F_LOCK_SWFW;
1987 		}
1988 		break;
1989 	default:
1990 		break;
1991 	}
1992 
1993 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
1994 	switch (sc->sc_type) {
1995 	case WM_T_82571:
1996 	case WM_T_82572:
1997 		reg = CSR_READ(sc, WMREG_SWSM2);
1998 		if ((reg & SWSM2_LOCK) == 0) {
1999 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2000 			force_clear_smbi = true;
2001 		} else
2002 			force_clear_smbi = false;
2003 		break;
2004 	case WM_T_82573:
2005 	case WM_T_82574:
2006 	case WM_T_82583:
2007 		force_clear_smbi = true;
2008 		break;
2009 	default:
2010 		force_clear_smbi = false;
2011 		break;
2012 	}
2013 	if (force_clear_smbi) {
2014 		reg = CSR_READ(sc, WMREG_SWSM);
2015 		if ((reg & SWSM_SMBI) != 0)
2016 			aprint_error_dev(sc->sc_dev,
2017 			    "Please update the Bootagent\n");
2018 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2019 	}
2020 
2021 	/*
2022 	 * Defer printing the EEPROM type until after verifying the checksum
2023 	 * This allows the EEPROM type to be printed correctly in the case
2024 	 * that no EEPROM is attached.
2025 	 */
2026 	/*
2027 	 * Validate the EEPROM checksum. If the checksum fails, flag
2028 	 * this for later, so we can fail future reads from the EEPROM.
2029 	 */
2030 	if (wm_nvm_validate_checksum(sc)) {
2031 		/*
2032 		 * Read twice again because some PCI-e parts fail the
2033 		 * first check due to the link being in sleep state.
2034 		 */
2035 		if (wm_nvm_validate_checksum(sc))
2036 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2037 	}
2038 
2039 	/* Set device properties (macflags) */
2040 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2041 
2042 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2043 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2044 	else {
2045 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2046 		    sc->sc_nvm_wordsize);
2047 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2048 			aprint_verbose("iNVM");
2049 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2050 			aprint_verbose("FLASH(HW)");
2051 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2052 			aprint_verbose("FLASH");
2053 		else {
2054 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2055 				eetype = "SPI";
2056 			else
2057 				eetype = "MicroWire";
2058 			aprint_verbose("(%d address bits) %s EEPROM",
2059 			    sc->sc_nvm_addrbits, eetype);
2060 		}
2061 	}
2062 	wm_nvm_version(sc);
2063 	aprint_verbose("\n");
2064 
2065 	/* Check for I21[01] PLL workaround */
2066 	if (sc->sc_type == WM_T_I210)
2067 		sc->sc_flags |= WM_F_PLL_WA_I210;
2068 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2069 		/* NVM image release 3.25 has a workaround */
2070 		if ((sc->sc_nvm_ver_major < 3)
2071 		    || ((sc->sc_nvm_ver_major == 3)
2072 			&& (sc->sc_nvm_ver_minor < 25))) {
2073 			aprint_verbose_dev(sc->sc_dev,
2074 			    "ROM image version %d.%d is older than 3.25\n",
2075 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2076 			sc->sc_flags |= WM_F_PLL_WA_I210;
2077 		}
2078 	}
2079 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2080 		wm_pll_workaround_i210(sc);
2081 
2082 	wm_get_wakeup(sc);
2083 	switch (sc->sc_type) {
2084 	case WM_T_82571:
2085 	case WM_T_82572:
2086 	case WM_T_82573:
2087 	case WM_T_82574:
2088 	case WM_T_82583:
2089 	case WM_T_80003:
2090 	case WM_T_ICH8:
2091 	case WM_T_ICH9:
2092 	case WM_T_ICH10:
2093 	case WM_T_PCH:
2094 	case WM_T_PCH2:
2095 	case WM_T_PCH_LPT:
2096 	case WM_T_PCH_SPT:
2097 		/* Non-AMT based hardware can now take control from firmware */
2098 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2099 			wm_get_hw_control(sc);
2100 		break;
2101 	default:
2102 		break;
2103 	}
2104 
2105 	/*
2106 	 * Read the Ethernet address from the EEPROM, if not first found
2107 	 * in device properties.
2108 	 */
2109 	ea = prop_dictionary_get(dict, "mac-address");
2110 	if (ea != NULL) {
2111 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2112 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2113 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2114 	} else {
2115 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2116 			aprint_error_dev(sc->sc_dev,
2117 			    "unable to read Ethernet address\n");
2118 			goto out;
2119 		}
2120 	}
2121 
2122 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2123 	    ether_sprintf(enaddr));
2124 
2125 	/*
2126 	 * Read the config info from the EEPROM, and set up various
2127 	 * bits in the control registers based on their contents.
2128 	 */
2129 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2130 	if (pn != NULL) {
2131 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2132 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2133 	} else {
2134 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2135 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2136 			goto out;
2137 		}
2138 	}
2139 
2140 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2141 	if (pn != NULL) {
2142 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2143 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2144 	} else {
2145 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2146 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2147 			goto out;
2148 		}
2149 	}
2150 
2151 	/* check for WM_F_WOL */
2152 	switch (sc->sc_type) {
2153 	case WM_T_82542_2_0:
2154 	case WM_T_82542_2_1:
2155 	case WM_T_82543:
2156 		/* dummy? */
2157 		eeprom_data = 0;
2158 		apme_mask = NVM_CFG3_APME;
2159 		break;
2160 	case WM_T_82544:
2161 		apme_mask = NVM_CFG2_82544_APM_EN;
2162 		eeprom_data = cfg2;
2163 		break;
2164 	case WM_T_82546:
2165 	case WM_T_82546_3:
2166 	case WM_T_82571:
2167 	case WM_T_82572:
2168 	case WM_T_82573:
2169 	case WM_T_82574:
2170 	case WM_T_82583:
2171 	case WM_T_80003:
2172 	default:
2173 		apme_mask = NVM_CFG3_APME;
2174 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2175 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2176 		break;
2177 	case WM_T_82575:
2178 	case WM_T_82576:
2179 	case WM_T_82580:
2180 	case WM_T_I350:
2181 	case WM_T_I354: /* XXX ok? */
2182 	case WM_T_ICH8:
2183 	case WM_T_ICH9:
2184 	case WM_T_ICH10:
2185 	case WM_T_PCH:
2186 	case WM_T_PCH2:
2187 	case WM_T_PCH_LPT:
2188 	case WM_T_PCH_SPT:
2189 		/* XXX The funcid should be checked on some devices */
2190 		apme_mask = WUC_APME;
2191 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2192 		break;
2193 	}
2194 
2195 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2196 	if ((eeprom_data & apme_mask) != 0)
2197 		sc->sc_flags |= WM_F_WOL;
2198 #ifdef WM_DEBUG
2199 	if ((sc->sc_flags & WM_F_WOL) != 0)
2200 		printf("WOL\n");
2201 #endif
2202 
2203 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2204 		/* Check NVM for autonegotiation */
2205 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2206 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2207 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2208 		}
2209 	}
2210 
2211 	/*
2212 	 * XXX need special handling for some multiple port cards
2213 	 * to disable a paticular port.
2214 	 */
2215 
2216 	if (sc->sc_type >= WM_T_82544) {
2217 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2218 		if (pn != NULL) {
2219 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2220 			swdpin = (uint16_t) prop_number_integer_value(pn);
2221 		} else {
2222 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2223 				aprint_error_dev(sc->sc_dev,
2224 				    "unable to read SWDPIN\n");
2225 				goto out;
2226 			}
2227 		}
2228 	}
2229 
2230 	if (cfg1 & NVM_CFG1_ILOS)
2231 		sc->sc_ctrl |= CTRL_ILOS;
2232 
2233 	/*
2234 	 * XXX
2235 	 * This code isn't correct because pin 2 and 3 are located
2236 	 * in different position on newer chips. Check all datasheet.
2237 	 *
2238 	 * Until resolve this problem, check if a chip < 82580
2239 	 */
2240 	if (sc->sc_type <= WM_T_82580) {
2241 		if (sc->sc_type >= WM_T_82544) {
2242 			sc->sc_ctrl |=
2243 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2244 			    CTRL_SWDPIO_SHIFT;
2245 			sc->sc_ctrl |=
2246 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2247 			    CTRL_SWDPINS_SHIFT;
2248 		} else {
2249 			sc->sc_ctrl |=
2250 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2251 			    CTRL_SWDPIO_SHIFT;
2252 		}
2253 	}
2254 
2255 	/* XXX For other than 82580? */
2256 	if (sc->sc_type == WM_T_82580) {
2257 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2258 		if (nvmword & __BIT(13))
2259 			sc->sc_ctrl |= CTRL_ILOS;
2260 	}
2261 
2262 #if 0
2263 	if (sc->sc_type >= WM_T_82544) {
2264 		if (cfg1 & NVM_CFG1_IPS0)
2265 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2266 		if (cfg1 & NVM_CFG1_IPS1)
2267 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2268 		sc->sc_ctrl_ext |=
2269 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2270 		    CTRL_EXT_SWDPIO_SHIFT;
2271 		sc->sc_ctrl_ext |=
2272 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2273 		    CTRL_EXT_SWDPINS_SHIFT;
2274 	} else {
2275 		sc->sc_ctrl_ext |=
2276 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2277 		    CTRL_EXT_SWDPIO_SHIFT;
2278 	}
2279 #endif
2280 
2281 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2282 #if 0
2283 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2284 #endif
2285 
2286 	if (sc->sc_type == WM_T_PCH) {
2287 		uint16_t val;
2288 
2289 		/* Save the NVM K1 bit setting */
2290 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2291 
2292 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2293 			sc->sc_nvm_k1_enabled = 1;
2294 		else
2295 			sc->sc_nvm_k1_enabled = 0;
2296 	}
2297 
2298 	/*
2299 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2300 	 * media structures accordingly.
2301 	 */
2302 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2303 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2304 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2305 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2306 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2307 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
2308 		wm_gmii_mediainit(sc, wmp->wmp_product);
2309 	} else if (sc->sc_type < WM_T_82543 ||
2310 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2311 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2312 			aprint_error_dev(sc->sc_dev,
2313 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2314 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2315 		}
2316 		wm_tbi_mediainit(sc);
2317 	} else {
2318 		switch (sc->sc_type) {
2319 		case WM_T_82575:
2320 		case WM_T_82576:
2321 		case WM_T_82580:
2322 		case WM_T_I350:
2323 		case WM_T_I354:
2324 		case WM_T_I210:
2325 		case WM_T_I211:
2326 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
2327 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2328 			switch (link_mode) {
2329 			case CTRL_EXT_LINK_MODE_1000KX:
2330 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2331 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2332 				break;
2333 			case CTRL_EXT_LINK_MODE_SGMII:
2334 				if (wm_sgmii_uses_mdio(sc)) {
2335 					aprint_verbose_dev(sc->sc_dev,
2336 					    "SGMII(MDIO)\n");
2337 					sc->sc_flags |= WM_F_SGMII;
2338 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2339 					break;
2340 				}
2341 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2342 				/*FALLTHROUGH*/
2343 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2344 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
2345 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2346 					if (link_mode
2347 					    == CTRL_EXT_LINK_MODE_SGMII) {
2348 						sc->sc_mediatype
2349 						    = WM_MEDIATYPE_COPPER;
2350 						sc->sc_flags |= WM_F_SGMII;
2351 					} else {
2352 						sc->sc_mediatype
2353 						    = WM_MEDIATYPE_SERDES;
2354 						aprint_verbose_dev(sc->sc_dev,
2355 						    "SERDES\n");
2356 					}
2357 					break;
2358 				}
2359 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2360 					aprint_verbose_dev(sc->sc_dev,
2361 					    "SERDES\n");
2362 
2363 				/* Change current link mode setting */
2364 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
2365 				switch (sc->sc_mediatype) {
2366 				case WM_MEDIATYPE_COPPER:
2367 					reg |= CTRL_EXT_LINK_MODE_SGMII;
2368 					break;
2369 				case WM_MEDIATYPE_SERDES:
2370 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2371 					break;
2372 				default:
2373 					break;
2374 				}
2375 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2376 				break;
2377 			case CTRL_EXT_LINK_MODE_GMII:
2378 			default:
2379 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
2380 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2381 				break;
2382 			}
2383 
2384 			reg &= ~CTRL_EXT_I2C_ENA;
2385 			if ((sc->sc_flags & WM_F_SGMII) != 0)
2386 				reg |= CTRL_EXT_I2C_ENA;
2387 			else
2388 				reg &= ~CTRL_EXT_I2C_ENA;
2389 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2390 
2391 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2392 				wm_gmii_mediainit(sc, wmp->wmp_product);
2393 			else
2394 				wm_tbi_mediainit(sc);
2395 			break;
2396 		default:
2397 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2398 				aprint_error_dev(sc->sc_dev,
2399 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2400 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2401 			wm_gmii_mediainit(sc, wmp->wmp_product);
2402 		}
2403 	}
2404 
2405 	ifp = &sc->sc_ethercom.ec_if;
2406 	xname = device_xname(sc->sc_dev);
2407 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2408 	ifp->if_softc = sc;
2409 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2410 	ifp->if_extflags = IFEF_START_MPSAFE;
2411 	ifp->if_ioctl = wm_ioctl;
2412 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2413 		ifp->if_start = wm_nq_start;
2414 		if (sc->sc_nqueues > 1)
2415 			ifp->if_transmit = wm_nq_transmit;
2416 	} else
2417 		ifp->if_start = wm_start;
2418 	ifp->if_watchdog = wm_watchdog;
2419 	ifp->if_init = wm_init;
2420 	ifp->if_stop = wm_stop;
2421 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2422 	IFQ_SET_READY(&ifp->if_snd);
2423 
2424 	/* Check for jumbo frame */
2425 	switch (sc->sc_type) {
2426 	case WM_T_82573:
2427 		/* XXX limited to 9234 if ASPM is disabled */
2428 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2429 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2430 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2431 		break;
2432 	case WM_T_82571:
2433 	case WM_T_82572:
2434 	case WM_T_82574:
2435 	case WM_T_82575:
2436 	case WM_T_82576:
2437 	case WM_T_82580:
2438 	case WM_T_I350:
2439 	case WM_T_I354: /* XXXX ok? */
2440 	case WM_T_I210:
2441 	case WM_T_I211:
2442 	case WM_T_80003:
2443 	case WM_T_ICH9:
2444 	case WM_T_ICH10:
2445 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2446 	case WM_T_PCH_LPT:
2447 	case WM_T_PCH_SPT:
2448 		/* XXX limited to 9234 */
2449 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2450 		break;
2451 	case WM_T_PCH:
2452 		/* XXX limited to 4096 */
2453 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2454 		break;
2455 	case WM_T_82542_2_0:
2456 	case WM_T_82542_2_1:
2457 	case WM_T_82583:
2458 	case WM_T_ICH8:
2459 		/* No support for jumbo frame */
2460 		break;
2461 	default:
2462 		/* ETHER_MAX_LEN_JUMBO */
2463 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2464 		break;
2465 	}
2466 
2467 	/* If we're a i82543 or greater, we can support VLANs. */
2468 	if (sc->sc_type >= WM_T_82543)
2469 		sc->sc_ethercom.ec_capabilities |=
2470 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2471 
2472 	/*
2473 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2474 	 * on i82543 and later.
2475 	 */
2476 	if (sc->sc_type >= WM_T_82543) {
2477 		ifp->if_capabilities |=
2478 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2479 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2480 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2481 		    IFCAP_CSUM_TCPv6_Tx |
2482 		    IFCAP_CSUM_UDPv6_Tx;
2483 	}
2484 
2485 	/*
2486 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2487 	 *
2488 	 *	82541GI (8086:1076) ... no
2489 	 *	82572EI (8086:10b9) ... yes
2490 	 */
2491 	if (sc->sc_type >= WM_T_82571) {
2492 		ifp->if_capabilities |=
2493 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2494 	}
2495 
2496 	/*
2497 	 * If we're a i82544 or greater (except i82547), we can do
2498 	 * TCP segmentation offload.
2499 	 */
2500 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2501 		ifp->if_capabilities |= IFCAP_TSOv4;
2502 	}
2503 
2504 	if (sc->sc_type >= WM_T_82571) {
2505 		ifp->if_capabilities |= IFCAP_TSOv6;
2506 	}
2507 
2508 #ifdef WM_MPSAFE
2509 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2510 #else
2511 	sc->sc_core_lock = NULL;
2512 #endif
2513 
2514 	/* Attach the interface. */
2515 	if_initialize(ifp);
2516 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2517 	ether_ifattach(ifp, enaddr);
2518 	if_register(ifp);
2519 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2520 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2521 			  RND_FLAG_DEFAULT);
2522 
2523 #ifdef WM_EVENT_COUNTERS
2524 	/* Attach event counters. */
2525 	evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC,
2526 	    NULL, xname, "txsstall");
2527 	evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC,
2528 	    NULL, xname, "txdstall");
2529 	evcnt_attach_dynamic(&sc->sc_ev_txfifo_stall, EVCNT_TYPE_MISC,
2530 	    NULL, xname, "txfifo_stall");
2531 	evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR,
2532 	    NULL, xname, "txdw");
2533 	evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR,
2534 	    NULL, xname, "txqe");
2535 	evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR,
2536 	    NULL, xname, "rxintr");
2537 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2538 	    NULL, xname, "linkintr");
2539 
2540 	evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC,
2541 	    NULL, xname, "rxipsum");
2542 	evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC,
2543 	    NULL, xname, "rxtusum");
2544 	evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC,
2545 	    NULL, xname, "txipsum");
2546 	evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC,
2547 	    NULL, xname, "txtusum");
2548 	evcnt_attach_dynamic(&sc->sc_ev_txtusum6, EVCNT_TYPE_MISC,
2549 	    NULL, xname, "txtusum6");
2550 
2551 	evcnt_attach_dynamic(&sc->sc_ev_txtso, EVCNT_TYPE_MISC,
2552 	    NULL, xname, "txtso");
2553 	evcnt_attach_dynamic(&sc->sc_ev_txtso6, EVCNT_TYPE_MISC,
2554 	    NULL, xname, "txtso6");
2555 	evcnt_attach_dynamic(&sc->sc_ev_txtsopain, EVCNT_TYPE_MISC,
2556 	    NULL, xname, "txtsopain");
2557 
2558 	for (i = 0; i < WM_NTXSEGS; i++) {
2559 		snprintf(wm_txseg_evcnt_names[i],
2560 		    sizeof(wm_txseg_evcnt_names[i]), "txseg%d", i);
2561 		evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC,
2562 		    NULL, xname, wm_txseg_evcnt_names[i]);
2563 	}
2564 
2565 	evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC,
2566 	    NULL, xname, "txdrop");
2567 
2568 	evcnt_attach_dynamic(&sc->sc_ev_tu, EVCNT_TYPE_MISC,
2569 	    NULL, xname, "tu");
2570 
2571 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2572 	    NULL, xname, "tx_xoff");
2573 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2574 	    NULL, xname, "tx_xon");
2575 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2576 	    NULL, xname, "rx_xoff");
2577 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2578 	    NULL, xname, "rx_xon");
2579 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2580 	    NULL, xname, "rx_macctl");
2581 #endif /* WM_EVENT_COUNTERS */
2582 
2583 	if (pmf_device_register(self, wm_suspend, wm_resume))
2584 		pmf_class_network_register(self, ifp);
2585 	else
2586 		aprint_error_dev(self, "couldn't establish power handler\n");
2587 
2588 	sc->sc_flags |= WM_F_ATTACHED;
2589  out:
2590 	return;
2591 }
2592 
2593 /* The detach function (ca_detach) */
2594 static int
2595 wm_detach(device_t self, int flags __unused)
2596 {
2597 	struct wm_softc *sc = device_private(self);
2598 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2599 	int i;
2600 
2601 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2602 		return 0;
2603 
2604 	/* Stop the interface. Callouts are stopped in it. */
2605 	wm_stop(ifp, 1);
2606 
2607 	pmf_device_deregister(self);
2608 
2609 	/* Tell the firmware about the release */
2610 	WM_CORE_LOCK(sc);
2611 	wm_release_manageability(sc);
2612 	wm_release_hw_control(sc);
2613 	WM_CORE_UNLOCK(sc);
2614 
2615 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2616 
2617 	/* Delete all remaining media. */
2618 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2619 
2620 	ether_ifdetach(ifp);
2621 	if_detach(ifp);
2622 	if_percpuq_destroy(sc->sc_ipq);
2623 
2624 	/* Unload RX dmamaps and free mbufs */
2625 	for (i = 0; i < sc->sc_nqueues; i++) {
2626 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2627 		mutex_enter(rxq->rxq_lock);
2628 		wm_rxdrain(rxq);
2629 		mutex_exit(rxq->rxq_lock);
2630 	}
2631 	/* Must unlock here */
2632 
2633 	/* Disestablish the interrupt handler */
2634 	for (i = 0; i < sc->sc_nintrs; i++) {
2635 		if (sc->sc_ihs[i] != NULL) {
2636 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2637 			sc->sc_ihs[i] = NULL;
2638 		}
2639 	}
2640 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2641 
2642 	wm_free_txrx_queues(sc);
2643 
2644 	/* Unmap the registers */
2645 	if (sc->sc_ss) {
2646 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2647 		sc->sc_ss = 0;
2648 	}
2649 	if (sc->sc_ios) {
2650 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2651 		sc->sc_ios = 0;
2652 	}
2653 	if (sc->sc_flashs) {
2654 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2655 		sc->sc_flashs = 0;
2656 	}
2657 
2658 	if (sc->sc_core_lock)
2659 		mutex_obj_free(sc->sc_core_lock);
2660 
2661 	return 0;
2662 }
2663 
2664 static bool
2665 wm_suspend(device_t self, const pmf_qual_t *qual)
2666 {
2667 	struct wm_softc *sc = device_private(self);
2668 
2669 	wm_release_manageability(sc);
2670 	wm_release_hw_control(sc);
2671 #ifdef WM_WOL
2672 	wm_enable_wakeup(sc);
2673 #endif
2674 
2675 	return true;
2676 }
2677 
2678 static bool
2679 wm_resume(device_t self, const pmf_qual_t *qual)
2680 {
2681 	struct wm_softc *sc = device_private(self);
2682 
2683 	wm_init_manageability(sc);
2684 
2685 	return true;
2686 }
2687 
2688 /*
2689  * wm_watchdog:		[ifnet interface function]
2690  *
2691  *	Watchdog timer handler.
2692  */
2693 static void
2694 wm_watchdog(struct ifnet *ifp)
2695 {
2696 	int qid;
2697 	struct wm_softc *sc = ifp->if_softc;
2698 
2699 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
2700 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2701 
2702 		wm_watchdog_txq(ifp, txq);
2703 	}
2704 
2705 	/* Reset the interface. */
2706 	(void) wm_init(ifp);
2707 
2708 	/*
2709 	 * There are still some upper layer processing which call
2710 	 * ifp->if_start(). e.g. ALTQ
2711 	 */
2712 	/* Try to get more packets going. */
2713 	ifp->if_start(ifp);
2714 }
2715 
2716 static void
2717 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2718 {
2719 	struct wm_softc *sc = ifp->if_softc;
2720 
2721 	/*
2722 	 * Since we're using delayed interrupts, sweep up
2723 	 * before we report an error.
2724 	 */
2725 	mutex_enter(txq->txq_lock);
2726 	wm_txeof(sc, txq);
2727 	mutex_exit(txq->txq_lock);
2728 
2729 	if (txq->txq_free != WM_NTXDESC(txq)) {
2730 #ifdef WM_DEBUG
2731 		int i, j;
2732 		struct wm_txsoft *txs;
2733 #endif
2734 		log(LOG_ERR,
2735 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2736 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2737 		    txq->txq_next);
2738 		ifp->if_oerrors++;
2739 #ifdef WM_DEBUG
2740 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
2741 		    i = WM_NEXTTXS(txq, i)) {
2742 		    txs = &txq->txq_soft[i];
2743 		    printf("txs %d tx %d -> %d\n",
2744 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2745 		    for (j = txs->txs_firstdesc; ;
2746 			j = WM_NEXTTX(txq, j)) {
2747 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2748 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2749 			printf("\t %#08x%08x\n",
2750 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2751 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2752 			if (j == txs->txs_lastdesc)
2753 				break;
2754 			}
2755 		}
2756 #endif
2757 	}
2758 }
2759 
2760 /*
2761  * wm_tick:
2762  *
2763  *	One second timer, used to check link status, sweep up
2764  *	completed transmit jobs, etc.
2765  */
2766 static void
2767 wm_tick(void *arg)
2768 {
2769 	struct wm_softc *sc = arg;
2770 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2771 #ifndef WM_MPSAFE
2772 	int s = splnet();
2773 #endif
2774 
2775 	WM_CORE_LOCK(sc);
2776 
2777 	if (sc->sc_stopping)
2778 		goto out;
2779 
2780 	if (sc->sc_type >= WM_T_82542_2_1) {
2781 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2782 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2783 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2784 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2785 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2786 	}
2787 
2788 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2789 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
2790 	    + CSR_READ(sc, WMREG_CRCERRS)
2791 	    + CSR_READ(sc, WMREG_ALGNERRC)
2792 	    + CSR_READ(sc, WMREG_SYMERRC)
2793 	    + CSR_READ(sc, WMREG_RXERRC)
2794 	    + CSR_READ(sc, WMREG_SEC)
2795 	    + CSR_READ(sc, WMREG_CEXTERR)
2796 	    + CSR_READ(sc, WMREG_RLEC);
2797 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2798 
2799 	if (sc->sc_flags & WM_F_HAS_MII)
2800 		mii_tick(&sc->sc_mii);
2801 	else if ((sc->sc_type >= WM_T_82575)
2802 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2803 		wm_serdes_tick(sc);
2804 	else
2805 		wm_tbi_tick(sc);
2806 
2807 out:
2808 	WM_CORE_UNLOCK(sc);
2809 #ifndef WM_MPSAFE
2810 	splx(s);
2811 #endif
2812 
2813 	if (!sc->sc_stopping)
2814 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2815 }
2816 
2817 static int
2818 wm_ifflags_cb(struct ethercom *ec)
2819 {
2820 	struct ifnet *ifp = &ec->ec_if;
2821 	struct wm_softc *sc = ifp->if_softc;
2822 	int change = ifp->if_flags ^ sc->sc_if_flags;
2823 	int rc = 0;
2824 
2825 	WM_CORE_LOCK(sc);
2826 
2827 	if (change != 0)
2828 		sc->sc_if_flags = ifp->if_flags;
2829 
2830 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2831 		rc = ENETRESET;
2832 		goto out;
2833 	}
2834 
2835 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2836 		wm_set_filter(sc);
2837 
2838 	wm_set_vlan(sc);
2839 
2840 out:
2841 	WM_CORE_UNLOCK(sc);
2842 
2843 	return rc;
2844 }
2845 
2846 /*
2847  * wm_ioctl:		[ifnet interface function]
2848  *
2849  *	Handle control requests from the operator.
2850  */
2851 static int
2852 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2853 {
2854 	struct wm_softc *sc = ifp->if_softc;
2855 	struct ifreq *ifr = (struct ifreq *) data;
2856 	struct ifaddr *ifa = (struct ifaddr *)data;
2857 	struct sockaddr_dl *sdl;
2858 	int s, error;
2859 
2860 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2861 		device_xname(sc->sc_dev), __func__));
2862 #ifndef WM_MPSAFE
2863 	s = splnet();
2864 #endif
2865 	switch (cmd) {
2866 	case SIOCSIFMEDIA:
2867 	case SIOCGIFMEDIA:
2868 		WM_CORE_LOCK(sc);
2869 		/* Flow control requires full-duplex mode. */
2870 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2871 		    (ifr->ifr_media & IFM_FDX) == 0)
2872 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2873 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2874 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2875 				/* We can do both TXPAUSE and RXPAUSE. */
2876 				ifr->ifr_media |=
2877 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2878 			}
2879 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2880 		}
2881 		WM_CORE_UNLOCK(sc);
2882 #ifdef WM_MPSAFE
2883 		s = splnet();
2884 #endif
2885 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2886 #ifdef WM_MPSAFE
2887 		splx(s);
2888 #endif
2889 		break;
2890 	case SIOCINITIFADDR:
2891 		WM_CORE_LOCK(sc);
2892 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2893 			sdl = satosdl(ifp->if_dl->ifa_addr);
2894 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2895 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2896 			/* unicast address is first multicast entry */
2897 			wm_set_filter(sc);
2898 			error = 0;
2899 			WM_CORE_UNLOCK(sc);
2900 			break;
2901 		}
2902 		WM_CORE_UNLOCK(sc);
2903 		/*FALLTHROUGH*/
2904 	default:
2905 #ifdef WM_MPSAFE
2906 		s = splnet();
2907 #endif
2908 		/* It may call wm_start, so unlock here */
2909 		error = ether_ioctl(ifp, cmd, data);
2910 #ifdef WM_MPSAFE
2911 		splx(s);
2912 #endif
2913 		if (error != ENETRESET)
2914 			break;
2915 
2916 		error = 0;
2917 
2918 		if (cmd == SIOCSIFCAP) {
2919 			error = (*ifp->if_init)(ifp);
2920 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2921 			;
2922 		else if (ifp->if_flags & IFF_RUNNING) {
2923 			/*
2924 			 * Multicast list has changed; set the hardware filter
2925 			 * accordingly.
2926 			 */
2927 			WM_CORE_LOCK(sc);
2928 			wm_set_filter(sc);
2929 			WM_CORE_UNLOCK(sc);
2930 		}
2931 		break;
2932 	}
2933 
2934 #ifndef WM_MPSAFE
2935 	splx(s);
2936 #endif
2937 	return error;
2938 }
2939 
2940 /* MAC address related */
2941 
2942 /*
2943  * Get the offset of MAC address and return it.
2944  * If error occured, use offset 0.
2945  */
2946 static uint16_t
2947 wm_check_alt_mac_addr(struct wm_softc *sc)
2948 {
2949 	uint16_t myea[ETHER_ADDR_LEN / 2];
2950 	uint16_t offset = NVM_OFF_MACADDR;
2951 
2952 	/* Try to read alternative MAC address pointer */
2953 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
2954 		return 0;
2955 
2956 	/* Check pointer if it's valid or not. */
2957 	if ((offset == 0x0000) || (offset == 0xffff))
2958 		return 0;
2959 
2960 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
2961 	/*
2962 	 * Check whether alternative MAC address is valid or not.
2963 	 * Some cards have non 0xffff pointer but those don't use
2964 	 * alternative MAC address in reality.
2965 	 *
2966 	 * Check whether the broadcast bit is set or not.
2967 	 */
2968 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
2969 		if (((myea[0] & 0xff) & 0x01) == 0)
2970 			return offset; /* Found */
2971 
2972 	/* Not found */
2973 	return 0;
2974 }
2975 
2976 static int
2977 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
2978 {
2979 	uint16_t myea[ETHER_ADDR_LEN / 2];
2980 	uint16_t offset = NVM_OFF_MACADDR;
2981 	int do_invert = 0;
2982 
2983 	switch (sc->sc_type) {
2984 	case WM_T_82580:
2985 	case WM_T_I350:
2986 	case WM_T_I354:
2987 		/* EEPROM Top Level Partitioning */
2988 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
2989 		break;
2990 	case WM_T_82571:
2991 	case WM_T_82575:
2992 	case WM_T_82576:
2993 	case WM_T_80003:
2994 	case WM_T_I210:
2995 	case WM_T_I211:
2996 		offset = wm_check_alt_mac_addr(sc);
2997 		if (offset == 0)
2998 			if ((sc->sc_funcid & 0x01) == 1)
2999 				do_invert = 1;
3000 		break;
3001 	default:
3002 		if ((sc->sc_funcid & 0x01) == 1)
3003 			do_invert = 1;
3004 		break;
3005 	}
3006 
3007 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]),
3008 		myea) != 0)
3009 		goto bad;
3010 
3011 	enaddr[0] = myea[0] & 0xff;
3012 	enaddr[1] = myea[0] >> 8;
3013 	enaddr[2] = myea[1] & 0xff;
3014 	enaddr[3] = myea[1] >> 8;
3015 	enaddr[4] = myea[2] & 0xff;
3016 	enaddr[5] = myea[2] >> 8;
3017 
3018 	/*
3019 	 * Toggle the LSB of the MAC address on the second port
3020 	 * of some dual port cards.
3021 	 */
3022 	if (do_invert != 0)
3023 		enaddr[5] ^= 1;
3024 
3025 	return 0;
3026 
3027  bad:
3028 	return -1;
3029 }
3030 
3031 /*
3032  * wm_set_ral:
3033  *
3034  *	Set an entery in the receive address list.
3035  */
3036 static void
3037 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3038 {
3039 	uint32_t ral_lo, ral_hi;
3040 
3041 	if (enaddr != NULL) {
3042 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3043 		    (enaddr[3] << 24);
3044 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3045 		ral_hi |= RAL_AV;
3046 	} else {
3047 		ral_lo = 0;
3048 		ral_hi = 0;
3049 	}
3050 
3051 	if (sc->sc_type >= WM_T_82544) {
3052 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3053 		    ral_lo);
3054 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3055 		    ral_hi);
3056 	} else {
3057 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3058 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3059 	}
3060 }
3061 
3062 /*
3063  * wm_mchash:
3064  *
3065  *	Compute the hash of the multicast address for the 4096-bit
3066  *	multicast filter.
3067  */
3068 static uint32_t
3069 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3070 {
3071 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3072 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3073 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3074 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3075 	uint32_t hash;
3076 
3077 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3078 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3079 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3080 	    || (sc->sc_type == WM_T_PCH_SPT)) {
3081 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3082 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3083 		return (hash & 0x3ff);
3084 	}
3085 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3086 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3087 
3088 	return (hash & 0xfff);
3089 }
3090 
3091 /*
3092  * wm_set_filter:
3093  *
3094  *	Set up the receive filter.
3095  */
3096 static void
3097 wm_set_filter(struct wm_softc *sc)
3098 {
3099 	struct ethercom *ec = &sc->sc_ethercom;
3100 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3101 	struct ether_multi *enm;
3102 	struct ether_multistep step;
3103 	bus_addr_t mta_reg;
3104 	uint32_t hash, reg, bit;
3105 	int i, size, ralmax;
3106 
3107 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3108 		device_xname(sc->sc_dev), __func__));
3109 	if (sc->sc_type >= WM_T_82544)
3110 		mta_reg = WMREG_CORDOVA_MTA;
3111 	else
3112 		mta_reg = WMREG_MTA;
3113 
3114 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3115 
3116 	if (ifp->if_flags & IFF_BROADCAST)
3117 		sc->sc_rctl |= RCTL_BAM;
3118 	if (ifp->if_flags & IFF_PROMISC) {
3119 		sc->sc_rctl |= RCTL_UPE;
3120 		goto allmulti;
3121 	}
3122 
3123 	/*
3124 	 * Set the station address in the first RAL slot, and
3125 	 * clear the remaining slots.
3126 	 */
3127 	if (sc->sc_type == WM_T_ICH8)
3128 		size = WM_RAL_TABSIZE_ICH8 -1;
3129 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3130 	    || (sc->sc_type == WM_T_PCH))
3131 		size = WM_RAL_TABSIZE_ICH8;
3132 	else if (sc->sc_type == WM_T_PCH2)
3133 		size = WM_RAL_TABSIZE_PCH2;
3134 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3135 		size = WM_RAL_TABSIZE_PCH_LPT;
3136 	else if (sc->sc_type == WM_T_82575)
3137 		size = WM_RAL_TABSIZE_82575;
3138 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3139 		size = WM_RAL_TABSIZE_82576;
3140 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3141 		size = WM_RAL_TABSIZE_I350;
3142 	else
3143 		size = WM_RAL_TABSIZE;
3144 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3145 
3146 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3147 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3148 		switch (i) {
3149 		case 0:
3150 			/* We can use all entries */
3151 			ralmax = size;
3152 			break;
3153 		case 1:
3154 			/* Only RAR[0] */
3155 			ralmax = 1;
3156 			break;
3157 		default:
3158 			/* available SHRA + RAR[0] */
3159 			ralmax = i + 1;
3160 		}
3161 	} else
3162 		ralmax = size;
3163 	for (i = 1; i < size; i++) {
3164 		if (i < ralmax)
3165 			wm_set_ral(sc, NULL, i);
3166 	}
3167 
3168 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3169 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3170 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3171 	    || (sc->sc_type == WM_T_PCH_SPT))
3172 		size = WM_ICH8_MC_TABSIZE;
3173 	else
3174 		size = WM_MC_TABSIZE;
3175 	/* Clear out the multicast table. */
3176 	for (i = 0; i < size; i++)
3177 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3178 
3179 	ETHER_FIRST_MULTI(step, ec, enm);
3180 	while (enm != NULL) {
3181 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3182 			/*
3183 			 * We must listen to a range of multicast addresses.
3184 			 * For now, just accept all multicasts, rather than
3185 			 * trying to set only those filter bits needed to match
3186 			 * the range.  (At this time, the only use of address
3187 			 * ranges is for IP multicast routing, for which the
3188 			 * range is big enough to require all bits set.)
3189 			 */
3190 			goto allmulti;
3191 		}
3192 
3193 		hash = wm_mchash(sc, enm->enm_addrlo);
3194 
3195 		reg = (hash >> 5);
3196 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3197 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3198 		    || (sc->sc_type == WM_T_PCH2)
3199 		    || (sc->sc_type == WM_T_PCH_LPT)
3200 		    || (sc->sc_type == WM_T_PCH_SPT))
3201 			reg &= 0x1f;
3202 		else
3203 			reg &= 0x7f;
3204 		bit = hash & 0x1f;
3205 
3206 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3207 		hash |= 1U << bit;
3208 
3209 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3210 			/*
3211 			 * 82544 Errata 9: Certain register cannot be written
3212 			 * with particular alignments in PCI-X bus operation
3213 			 * (FCAH, MTA and VFTA).
3214 			 */
3215 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3216 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3217 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3218 		} else
3219 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3220 
3221 		ETHER_NEXT_MULTI(step, enm);
3222 	}
3223 
3224 	ifp->if_flags &= ~IFF_ALLMULTI;
3225 	goto setit;
3226 
3227  allmulti:
3228 	ifp->if_flags |= IFF_ALLMULTI;
3229 	sc->sc_rctl |= RCTL_MPE;
3230 
3231  setit:
3232 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3233 }
3234 
3235 /* Reset and init related */
3236 
3237 static void
3238 wm_set_vlan(struct wm_softc *sc)
3239 {
3240 
3241 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3242 		device_xname(sc->sc_dev), __func__));
3243 	/* Deal with VLAN enables. */
3244 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3245 		sc->sc_ctrl |= CTRL_VME;
3246 	else
3247 		sc->sc_ctrl &= ~CTRL_VME;
3248 
3249 	/* Write the control registers. */
3250 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3251 }
3252 
3253 static void
3254 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3255 {
3256 	uint32_t gcr;
3257 	pcireg_t ctrl2;
3258 
3259 	gcr = CSR_READ(sc, WMREG_GCR);
3260 
3261 	/* Only take action if timeout value is defaulted to 0 */
3262 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3263 		goto out;
3264 
3265 	if ((gcr & GCR_CAP_VER2) == 0) {
3266 		gcr |= GCR_CMPL_TMOUT_10MS;
3267 		goto out;
3268 	}
3269 
3270 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3271 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3272 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3273 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3274 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3275 
3276 out:
3277 	/* Disable completion timeout resend */
3278 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3279 
3280 	CSR_WRITE(sc, WMREG_GCR, gcr);
3281 }
3282 
3283 void
3284 wm_get_auto_rd_done(struct wm_softc *sc)
3285 {
3286 	int i;
3287 
3288 	/* wait for eeprom to reload */
3289 	switch (sc->sc_type) {
3290 	case WM_T_82571:
3291 	case WM_T_82572:
3292 	case WM_T_82573:
3293 	case WM_T_82574:
3294 	case WM_T_82583:
3295 	case WM_T_82575:
3296 	case WM_T_82576:
3297 	case WM_T_82580:
3298 	case WM_T_I350:
3299 	case WM_T_I354:
3300 	case WM_T_I210:
3301 	case WM_T_I211:
3302 	case WM_T_80003:
3303 	case WM_T_ICH8:
3304 	case WM_T_ICH9:
3305 		for (i = 0; i < 10; i++) {
3306 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3307 				break;
3308 			delay(1000);
3309 		}
3310 		if (i == 10) {
3311 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3312 			    "complete\n", device_xname(sc->sc_dev));
3313 		}
3314 		break;
3315 	default:
3316 		break;
3317 	}
3318 }
3319 
3320 void
3321 wm_lan_init_done(struct wm_softc *sc)
3322 {
3323 	uint32_t reg = 0;
3324 	int i;
3325 
3326 	/* wait for eeprom to reload */
3327 	switch (sc->sc_type) {
3328 	case WM_T_ICH10:
3329 	case WM_T_PCH:
3330 	case WM_T_PCH2:
3331 	case WM_T_PCH_LPT:
3332 	case WM_T_PCH_SPT:
3333 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3334 			reg = CSR_READ(sc, WMREG_STATUS);
3335 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3336 				break;
3337 			delay(100);
3338 		}
3339 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3340 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3341 			    "complete\n", device_xname(sc->sc_dev), __func__);
3342 		}
3343 		break;
3344 	default:
3345 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3346 		    __func__);
3347 		break;
3348 	}
3349 
3350 	reg &= ~STATUS_LAN_INIT_DONE;
3351 	CSR_WRITE(sc, WMREG_STATUS, reg);
3352 }
3353 
3354 void
3355 wm_get_cfg_done(struct wm_softc *sc)
3356 {
3357 	int mask;
3358 	uint32_t reg;
3359 	int i;
3360 
3361 	/* wait for eeprom to reload */
3362 	switch (sc->sc_type) {
3363 	case WM_T_82542_2_0:
3364 	case WM_T_82542_2_1:
3365 		/* null */
3366 		break;
3367 	case WM_T_82543:
3368 	case WM_T_82544:
3369 	case WM_T_82540:
3370 	case WM_T_82545:
3371 	case WM_T_82545_3:
3372 	case WM_T_82546:
3373 	case WM_T_82546_3:
3374 	case WM_T_82541:
3375 	case WM_T_82541_2:
3376 	case WM_T_82547:
3377 	case WM_T_82547_2:
3378 	case WM_T_82573:
3379 	case WM_T_82574:
3380 	case WM_T_82583:
3381 		/* generic */
3382 		delay(10*1000);
3383 		break;
3384 	case WM_T_80003:
3385 	case WM_T_82571:
3386 	case WM_T_82572:
3387 	case WM_T_82575:
3388 	case WM_T_82576:
3389 	case WM_T_82580:
3390 	case WM_T_I350:
3391 	case WM_T_I354:
3392 	case WM_T_I210:
3393 	case WM_T_I211:
3394 		if (sc->sc_type == WM_T_82571) {
3395 			/* Only 82571 shares port 0 */
3396 			mask = EEMNGCTL_CFGDONE_0;
3397 		} else
3398 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3399 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3400 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3401 				break;
3402 			delay(1000);
3403 		}
3404 		if (i >= WM_PHY_CFG_TIMEOUT) {
3405 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3406 				device_xname(sc->sc_dev), __func__));
3407 		}
3408 		break;
3409 	case WM_T_ICH8:
3410 	case WM_T_ICH9:
3411 	case WM_T_ICH10:
3412 	case WM_T_PCH:
3413 	case WM_T_PCH2:
3414 	case WM_T_PCH_LPT:
3415 	case WM_T_PCH_SPT:
3416 		delay(10*1000);
3417 		if (sc->sc_type >= WM_T_ICH10)
3418 			wm_lan_init_done(sc);
3419 		else
3420 			wm_get_auto_rd_done(sc);
3421 
3422 		reg = CSR_READ(sc, WMREG_STATUS);
3423 		if ((reg & STATUS_PHYRA) != 0)
3424 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3425 		break;
3426 	default:
3427 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3428 		    __func__);
3429 		break;
3430 	}
3431 }
3432 
3433 /* Init hardware bits */
3434 void
3435 wm_initialize_hardware_bits(struct wm_softc *sc)
3436 {
3437 	uint32_t tarc0, tarc1, reg;
3438 
3439 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3440 		device_xname(sc->sc_dev), __func__));
3441 	/* For 82571 variant, 80003 and ICHs */
3442 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3443 	    || (sc->sc_type >= WM_T_80003)) {
3444 
3445 		/* Transmit Descriptor Control 0 */
3446 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
3447 		reg |= TXDCTL_COUNT_DESC;
3448 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3449 
3450 		/* Transmit Descriptor Control 1 */
3451 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
3452 		reg |= TXDCTL_COUNT_DESC;
3453 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3454 
3455 		/* TARC0 */
3456 		tarc0 = CSR_READ(sc, WMREG_TARC0);
3457 		switch (sc->sc_type) {
3458 		case WM_T_82571:
3459 		case WM_T_82572:
3460 		case WM_T_82573:
3461 		case WM_T_82574:
3462 		case WM_T_82583:
3463 		case WM_T_80003:
3464 			/* Clear bits 30..27 */
3465 			tarc0 &= ~__BITS(30, 27);
3466 			break;
3467 		default:
3468 			break;
3469 		}
3470 
3471 		switch (sc->sc_type) {
3472 		case WM_T_82571:
3473 		case WM_T_82572:
3474 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3475 
3476 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3477 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3478 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3479 			/* 8257[12] Errata No.7 */
3480 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
3481 
3482 			/* TARC1 bit 28 */
3483 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3484 				tarc1 &= ~__BIT(28);
3485 			else
3486 				tarc1 |= __BIT(28);
3487 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3488 
3489 			/*
3490 			 * 8257[12] Errata No.13
3491 			 * Disable Dyamic Clock Gating.
3492 			 */
3493 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3494 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
3495 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3496 			break;
3497 		case WM_T_82573:
3498 		case WM_T_82574:
3499 		case WM_T_82583:
3500 			if ((sc->sc_type == WM_T_82574)
3501 			    || (sc->sc_type == WM_T_82583))
3502 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
3503 
3504 			/* Extended Device Control */
3505 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3506 			reg &= ~__BIT(23);	/* Clear bit 23 */
3507 			reg |= __BIT(22);	/* Set bit 22 */
3508 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3509 
3510 			/* Device Control */
3511 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
3512 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3513 
3514 			/* PCIe Control Register */
3515 			/*
3516 			 * 82573 Errata (unknown).
3517 			 *
3518 			 * 82574 Errata 25 and 82583 Errata 12
3519 			 * "Dropped Rx Packets":
3520 			 *   NVM Image Version 2.1.4 and newer has no this bug.
3521 			 */
3522 			reg = CSR_READ(sc, WMREG_GCR);
3523 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3524 			CSR_WRITE(sc, WMREG_GCR, reg);
3525 
3526 			if ((sc->sc_type == WM_T_82574)
3527 			    || (sc->sc_type == WM_T_82583)) {
3528 				/*
3529 				 * Document says this bit must be set for
3530 				 * proper operation.
3531 				 */
3532 				reg = CSR_READ(sc, WMREG_GCR);
3533 				reg |= __BIT(22);
3534 				CSR_WRITE(sc, WMREG_GCR, reg);
3535 
3536 				/*
3537 				 * Apply workaround for hardware errata
3538 				 * documented in errata docs Fixes issue where
3539 				 * some error prone or unreliable PCIe
3540 				 * completions are occurring, particularly
3541 				 * with ASPM enabled. Without fix, issue can
3542 				 * cause Tx timeouts.
3543 				 */
3544 				reg = CSR_READ(sc, WMREG_GCR2);
3545 				reg |= __BIT(0);
3546 				CSR_WRITE(sc, WMREG_GCR2, reg);
3547 			}
3548 			break;
3549 		case WM_T_80003:
3550 			/* TARC0 */
3551 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3552 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3553 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
3554 
3555 			/* TARC1 bit 28 */
3556 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3557 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3558 				tarc1 &= ~__BIT(28);
3559 			else
3560 				tarc1 |= __BIT(28);
3561 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3562 			break;
3563 		case WM_T_ICH8:
3564 		case WM_T_ICH9:
3565 		case WM_T_ICH10:
3566 		case WM_T_PCH:
3567 		case WM_T_PCH2:
3568 		case WM_T_PCH_LPT:
3569 		case WM_T_PCH_SPT:
3570 			/* TARC0 */
3571 			if ((sc->sc_type == WM_T_ICH8)
3572 			    || (sc->sc_type == WM_T_PCH_SPT)) {
3573 				/* Set TARC0 bits 29 and 28 */
3574 				tarc0 |= __BITS(29, 28);
3575 			}
3576 			/* Set TARC0 bits 23,24,26,27 */
3577 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3578 
3579 			/* CTRL_EXT */
3580 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3581 			reg |= __BIT(22);	/* Set bit 22 */
3582 			/*
3583 			 * Enable PHY low-power state when MAC is at D3
3584 			 * w/o WoL
3585 			 */
3586 			if (sc->sc_type >= WM_T_PCH)
3587 				reg |= CTRL_EXT_PHYPDEN;
3588 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3589 
3590 			/* TARC1 */
3591 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3592 			/* bit 28 */
3593 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3594 				tarc1 &= ~__BIT(28);
3595 			else
3596 				tarc1 |= __BIT(28);
3597 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3598 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3599 
3600 			/* Device Status */
3601 			if (sc->sc_type == WM_T_ICH8) {
3602 				reg = CSR_READ(sc, WMREG_STATUS);
3603 				reg &= ~__BIT(31);
3604 				CSR_WRITE(sc, WMREG_STATUS, reg);
3605 
3606 			}
3607 
3608 			/* IOSFPC */
3609 			if (sc->sc_type == WM_T_PCH_SPT) {
3610 				reg = CSR_READ(sc, WMREG_IOSFPC);
3611 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3612 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
3613 			}
3614 			/*
3615 			 * Work-around descriptor data corruption issue during
3616 			 * NFS v2 UDP traffic, just disable the NFS filtering
3617 			 * capability.
3618 			 */
3619 			reg = CSR_READ(sc, WMREG_RFCTL);
3620 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3621 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3622 			break;
3623 		default:
3624 			break;
3625 		}
3626 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
3627 
3628 		/*
3629 		 * 8257[12] Errata No.52 and some others.
3630 		 * Avoid RSS Hash Value bug.
3631 		 */
3632 		switch (sc->sc_type) {
3633 		case WM_T_82571:
3634 		case WM_T_82572:
3635 		case WM_T_82573:
3636 		case WM_T_80003:
3637 		case WM_T_ICH8:
3638 			reg = CSR_READ(sc, WMREG_RFCTL);
3639 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3640 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3641 			break;
3642 		default:
3643 			break;
3644 		}
3645 	}
3646 }
3647 
3648 static uint32_t
3649 wm_rxpbs_adjust_82580(uint32_t val)
3650 {
3651 	uint32_t rv = 0;
3652 
3653 	if (val < __arraycount(wm_82580_rxpbs_table))
3654 		rv = wm_82580_rxpbs_table[val];
3655 
3656 	return rv;
3657 }
3658 
3659 /*
3660  * wm_reset:
3661  *
3662  *	Reset the i82542 chip.
3663  */
3664 static void
3665 wm_reset(struct wm_softc *sc)
3666 {
3667 	int phy_reset = 0;
3668 	int i, error = 0;
3669 	uint32_t reg, mask;
3670 
3671 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3672 		device_xname(sc->sc_dev), __func__));
3673 	/*
3674 	 * Allocate on-chip memory according to the MTU size.
3675 	 * The Packet Buffer Allocation register must be written
3676 	 * before the chip is reset.
3677 	 */
3678 	switch (sc->sc_type) {
3679 	case WM_T_82547:
3680 	case WM_T_82547_2:
3681 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3682 		    PBA_22K : PBA_30K;
3683 		for (i = 0; i < sc->sc_nqueues; i++) {
3684 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3685 			txq->txq_fifo_head = 0;
3686 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3687 			txq->txq_fifo_size =
3688 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3689 			txq->txq_fifo_stall = 0;
3690 		}
3691 		break;
3692 	case WM_T_82571:
3693 	case WM_T_82572:
3694 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3695 	case WM_T_80003:
3696 		sc->sc_pba = PBA_32K;
3697 		break;
3698 	case WM_T_82573:
3699 		sc->sc_pba = PBA_12K;
3700 		break;
3701 	case WM_T_82574:
3702 	case WM_T_82583:
3703 		sc->sc_pba = PBA_20K;
3704 		break;
3705 	case WM_T_82576:
3706 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3707 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3708 		break;
3709 	case WM_T_82580:
3710 	case WM_T_I350:
3711 	case WM_T_I354:
3712 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3713 		break;
3714 	case WM_T_I210:
3715 	case WM_T_I211:
3716 		sc->sc_pba = PBA_34K;
3717 		break;
3718 	case WM_T_ICH8:
3719 		/* Workaround for a bit corruption issue in FIFO memory */
3720 		sc->sc_pba = PBA_8K;
3721 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3722 		break;
3723 	case WM_T_ICH9:
3724 	case WM_T_ICH10:
3725 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3726 		    PBA_14K : PBA_10K;
3727 		break;
3728 	case WM_T_PCH:
3729 	case WM_T_PCH2:
3730 	case WM_T_PCH_LPT:
3731 	case WM_T_PCH_SPT:
3732 		sc->sc_pba = PBA_26K;
3733 		break;
3734 	default:
3735 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3736 		    PBA_40K : PBA_48K;
3737 		break;
3738 	}
3739 	/*
3740 	 * Only old or non-multiqueue devices have the PBA register
3741 	 * XXX Need special handling for 82575.
3742 	 */
3743 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3744 	    || (sc->sc_type == WM_T_82575))
3745 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3746 
3747 	/* Prevent the PCI-E bus from sticking */
3748 	if (sc->sc_flags & WM_F_PCIE) {
3749 		int timeout = 800;
3750 
3751 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3752 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3753 
3754 		while (timeout--) {
3755 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3756 			    == 0)
3757 				break;
3758 			delay(100);
3759 		}
3760 	}
3761 
3762 	/* Set the completion timeout for interface */
3763 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3764 	    || (sc->sc_type == WM_T_82580)
3765 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3766 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3767 		wm_set_pcie_completion_timeout(sc);
3768 
3769 	/* Clear interrupt */
3770 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3771 	if (sc->sc_nintrs > 1) {
3772 		if (sc->sc_type != WM_T_82574) {
3773 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3774 			CSR_WRITE(sc, WMREG_EIAC, 0);
3775 		} else {
3776 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3777 		}
3778 	}
3779 
3780 	/* Stop the transmit and receive processes. */
3781 	CSR_WRITE(sc, WMREG_RCTL, 0);
3782 	sc->sc_rctl &= ~RCTL_EN;
3783 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3784 	CSR_WRITE_FLUSH(sc);
3785 
3786 	/* XXX set_tbi_sbp_82543() */
3787 
3788 	delay(10*1000);
3789 
3790 	/* Must acquire the MDIO ownership before MAC reset */
3791 	switch (sc->sc_type) {
3792 	case WM_T_82573:
3793 	case WM_T_82574:
3794 	case WM_T_82583:
3795 		error = wm_get_hw_semaphore_82573(sc);
3796 		break;
3797 	default:
3798 		break;
3799 	}
3800 
3801 	/*
3802 	 * 82541 Errata 29? & 82547 Errata 28?
3803 	 * See also the description about PHY_RST bit in CTRL register
3804 	 * in 8254x_GBe_SDM.pdf.
3805 	 */
3806 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3807 		CSR_WRITE(sc, WMREG_CTRL,
3808 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3809 		CSR_WRITE_FLUSH(sc);
3810 		delay(5000);
3811 	}
3812 
3813 	switch (sc->sc_type) {
3814 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3815 	case WM_T_82541:
3816 	case WM_T_82541_2:
3817 	case WM_T_82547:
3818 	case WM_T_82547_2:
3819 		/*
3820 		 * On some chipsets, a reset through a memory-mapped write
3821 		 * cycle can cause the chip to reset before completing the
3822 		 * write cycle.  This causes major headache that can be
3823 		 * avoided by issuing the reset via indirect register writes
3824 		 * through I/O space.
3825 		 *
3826 		 * So, if we successfully mapped the I/O BAR at attach time,
3827 		 * use that.  Otherwise, try our luck with a memory-mapped
3828 		 * reset.
3829 		 */
3830 		if (sc->sc_flags & WM_F_IOH_VALID)
3831 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3832 		else
3833 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3834 		break;
3835 	case WM_T_82545_3:
3836 	case WM_T_82546_3:
3837 		/* Use the shadow control register on these chips. */
3838 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3839 		break;
3840 	case WM_T_80003:
3841 		mask = swfwphysem[sc->sc_funcid];
3842 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3843 		wm_get_swfw_semaphore(sc, mask);
3844 		CSR_WRITE(sc, WMREG_CTRL, reg);
3845 		wm_put_swfw_semaphore(sc, mask);
3846 		break;
3847 	case WM_T_ICH8:
3848 	case WM_T_ICH9:
3849 	case WM_T_ICH10:
3850 	case WM_T_PCH:
3851 	case WM_T_PCH2:
3852 	case WM_T_PCH_LPT:
3853 	case WM_T_PCH_SPT:
3854 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3855 		if (wm_phy_resetisblocked(sc) == false) {
3856 			/*
3857 			 * Gate automatic PHY configuration by hardware on
3858 			 * non-managed 82579
3859 			 */
3860 			if ((sc->sc_type == WM_T_PCH2)
3861 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3862 				== 0))
3863 				wm_gate_hw_phy_config_ich8lan(sc, true);
3864 
3865 			reg |= CTRL_PHY_RESET;
3866 			phy_reset = 1;
3867 		} else
3868 			printf("XXX reset is blocked!!!\n");
3869 		wm_get_swfwhw_semaphore(sc);
3870 		CSR_WRITE(sc, WMREG_CTRL, reg);
3871 		/* Don't insert a completion barrier when reset */
3872 		delay(20*1000);
3873 		wm_put_swfwhw_semaphore(sc);
3874 		break;
3875 	case WM_T_82580:
3876 	case WM_T_I350:
3877 	case WM_T_I354:
3878 	case WM_T_I210:
3879 	case WM_T_I211:
3880 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3881 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3882 			CSR_WRITE_FLUSH(sc);
3883 		delay(5000);
3884 		break;
3885 	case WM_T_82542_2_0:
3886 	case WM_T_82542_2_1:
3887 	case WM_T_82543:
3888 	case WM_T_82540:
3889 	case WM_T_82545:
3890 	case WM_T_82546:
3891 	case WM_T_82571:
3892 	case WM_T_82572:
3893 	case WM_T_82573:
3894 	case WM_T_82574:
3895 	case WM_T_82575:
3896 	case WM_T_82576:
3897 	case WM_T_82583:
3898 	default:
3899 		/* Everything else can safely use the documented method. */
3900 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3901 		break;
3902 	}
3903 
3904 	/* Must release the MDIO ownership after MAC reset */
3905 	switch (sc->sc_type) {
3906 	case WM_T_82573:
3907 	case WM_T_82574:
3908 	case WM_T_82583:
3909 		if (error == 0)
3910 			wm_put_hw_semaphore_82573(sc);
3911 		break;
3912 	default:
3913 		break;
3914 	}
3915 
3916 	if (phy_reset != 0)
3917 		wm_get_cfg_done(sc);
3918 
3919 	/* reload EEPROM */
3920 	switch (sc->sc_type) {
3921 	case WM_T_82542_2_0:
3922 	case WM_T_82542_2_1:
3923 	case WM_T_82543:
3924 	case WM_T_82544:
3925 		delay(10);
3926 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3927 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3928 		CSR_WRITE_FLUSH(sc);
3929 		delay(2000);
3930 		break;
3931 	case WM_T_82540:
3932 	case WM_T_82545:
3933 	case WM_T_82545_3:
3934 	case WM_T_82546:
3935 	case WM_T_82546_3:
3936 		delay(5*1000);
3937 		/* XXX Disable HW ARPs on ASF enabled adapters */
3938 		break;
3939 	case WM_T_82541:
3940 	case WM_T_82541_2:
3941 	case WM_T_82547:
3942 	case WM_T_82547_2:
3943 		delay(20000);
3944 		/* XXX Disable HW ARPs on ASF enabled adapters */
3945 		break;
3946 	case WM_T_82571:
3947 	case WM_T_82572:
3948 	case WM_T_82573:
3949 	case WM_T_82574:
3950 	case WM_T_82583:
3951 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
3952 			delay(10);
3953 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
3954 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3955 			CSR_WRITE_FLUSH(sc);
3956 		}
3957 		/* check EECD_EE_AUTORD */
3958 		wm_get_auto_rd_done(sc);
3959 		/*
3960 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
3961 		 * is set.
3962 		 */
3963 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
3964 		    || (sc->sc_type == WM_T_82583))
3965 			delay(25*1000);
3966 		break;
3967 	case WM_T_82575:
3968 	case WM_T_82576:
3969 	case WM_T_82580:
3970 	case WM_T_I350:
3971 	case WM_T_I354:
3972 	case WM_T_I210:
3973 	case WM_T_I211:
3974 	case WM_T_80003:
3975 		/* check EECD_EE_AUTORD */
3976 		wm_get_auto_rd_done(sc);
3977 		break;
3978 	case WM_T_ICH8:
3979 	case WM_T_ICH9:
3980 	case WM_T_ICH10:
3981 	case WM_T_PCH:
3982 	case WM_T_PCH2:
3983 	case WM_T_PCH_LPT:
3984 	case WM_T_PCH_SPT:
3985 		break;
3986 	default:
3987 		panic("%s: unknown type\n", __func__);
3988 	}
3989 
3990 	/* Check whether EEPROM is present or not */
3991 	switch (sc->sc_type) {
3992 	case WM_T_82575:
3993 	case WM_T_82576:
3994 	case WM_T_82580:
3995 	case WM_T_I350:
3996 	case WM_T_I354:
3997 	case WM_T_ICH8:
3998 	case WM_T_ICH9:
3999 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4000 			/* Not found */
4001 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4002 			if (sc->sc_type == WM_T_82575)
4003 				wm_reset_init_script_82575(sc);
4004 		}
4005 		break;
4006 	default:
4007 		break;
4008 	}
4009 
4010 	if ((sc->sc_type == WM_T_82580)
4011 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4012 		/* clear global device reset status bit */
4013 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4014 	}
4015 
4016 	/* Clear any pending interrupt events. */
4017 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4018 	reg = CSR_READ(sc, WMREG_ICR);
4019 	if (sc->sc_nintrs > 1) {
4020 		if (sc->sc_type != WM_T_82574) {
4021 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4022 			CSR_WRITE(sc, WMREG_EIAC, 0);
4023 		} else
4024 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4025 	}
4026 
4027 	/* reload sc_ctrl */
4028 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4029 
4030 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4031 		wm_set_eee_i350(sc);
4032 
4033 	/* dummy read from WUC */
4034 	if (sc->sc_type == WM_T_PCH)
4035 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4036 	/*
4037 	 * For PCH, this write will make sure that any noise will be detected
4038 	 * as a CRC error and be dropped rather than show up as a bad packet
4039 	 * to the DMA engine
4040 	 */
4041 	if (sc->sc_type == WM_T_PCH)
4042 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4043 
4044 	if (sc->sc_type >= WM_T_82544)
4045 		CSR_WRITE(sc, WMREG_WUC, 0);
4046 
4047 	wm_reset_mdicnfg_82580(sc);
4048 
4049 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4050 		wm_pll_workaround_i210(sc);
4051 }
4052 
4053 /*
4054  * wm_add_rxbuf:
4055  *
4056  *	Add a receive buffer to the indiciated descriptor.
4057  */
4058 static int
4059 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4060 {
4061 	struct wm_softc *sc = rxq->rxq_sc;
4062 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4063 	struct mbuf *m;
4064 	int error;
4065 
4066 	KASSERT(mutex_owned(rxq->rxq_lock));
4067 
4068 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4069 	if (m == NULL)
4070 		return ENOBUFS;
4071 
4072 	MCLGET(m, M_DONTWAIT);
4073 	if ((m->m_flags & M_EXT) == 0) {
4074 		m_freem(m);
4075 		return ENOBUFS;
4076 	}
4077 
4078 	if (rxs->rxs_mbuf != NULL)
4079 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4080 
4081 	rxs->rxs_mbuf = m;
4082 
4083 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4084 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4085 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
4086 	if (error) {
4087 		/* XXX XXX XXX */
4088 		aprint_error_dev(sc->sc_dev,
4089 		    "unable to load rx DMA map %d, error = %d\n",
4090 		    idx, error);
4091 		panic("wm_add_rxbuf");
4092 	}
4093 
4094 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4095 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4096 
4097 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4098 		if ((sc->sc_rctl & RCTL_EN) != 0)
4099 			wm_init_rxdesc(rxq, idx);
4100 	} else
4101 		wm_init_rxdesc(rxq, idx);
4102 
4103 	return 0;
4104 }
4105 
4106 /*
4107  * wm_rxdrain:
4108  *
4109  *	Drain the receive queue.
4110  */
4111 static void
4112 wm_rxdrain(struct wm_rxqueue *rxq)
4113 {
4114 	struct wm_softc *sc = rxq->rxq_sc;
4115 	struct wm_rxsoft *rxs;
4116 	int i;
4117 
4118 	KASSERT(mutex_owned(rxq->rxq_lock));
4119 
4120 	for (i = 0; i < WM_NRXDESC; i++) {
4121 		rxs = &rxq->rxq_soft[i];
4122 		if (rxs->rxs_mbuf != NULL) {
4123 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4124 			m_freem(rxs->rxs_mbuf);
4125 			rxs->rxs_mbuf = NULL;
4126 		}
4127 	}
4128 }
4129 
4130 
4131 /*
4132  * XXX copy from FreeBSD's sys/net/rss_config.c
4133  */
4134 /*
4135  * RSS secret key, intended to prevent attacks on load-balancing.  Its
4136  * effectiveness may be limited by algorithm choice and available entropy
4137  * during the boot.
4138  *
4139  * XXXRW: And that we don't randomize it yet!
4140  *
4141  * This is the default Microsoft RSS specification key which is also
4142  * the Chelsio T5 firmware default key.
4143  */
4144 #define RSS_KEYSIZE 40
4145 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4146 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4147 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4148 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4149 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4150 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4151 };
4152 
4153 /*
4154  * Caller must pass an array of size sizeof(rss_key).
4155  *
4156  * XXX
4157  * As if_ixgbe may use this function, this function should not be
4158  * if_wm specific function.
4159  */
4160 static void
4161 wm_rss_getkey(uint8_t *key)
4162 {
4163 
4164 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4165 }
4166 
4167 /*
4168  * Setup registers for RSS.
4169  *
4170  * XXX not yet VMDq support
4171  */
4172 static void
4173 wm_init_rss(struct wm_softc *sc)
4174 {
4175 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4176 	int i;
4177 
4178 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4179 
4180 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4181 		int qid, reta_ent;
4182 
4183 		qid  = i % sc->sc_nqueues;
4184 		switch(sc->sc_type) {
4185 		case WM_T_82574:
4186 			reta_ent = __SHIFTIN(qid,
4187 			    RETA_ENT_QINDEX_MASK_82574);
4188 			break;
4189 		case WM_T_82575:
4190 			reta_ent = __SHIFTIN(qid,
4191 			    RETA_ENT_QINDEX1_MASK_82575);
4192 			break;
4193 		default:
4194 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4195 			break;
4196 		}
4197 
4198 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4199 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4200 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4201 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4202 	}
4203 
4204 	wm_rss_getkey((uint8_t *)rss_key);
4205 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4206 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4207 
4208 	if (sc->sc_type == WM_T_82574)
4209 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4210 	else
4211 		mrqc = MRQC_ENABLE_RSS_MQ;
4212 
4213 	/* XXXX
4214 	 * The same as FreeBSD igb.
4215 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4216 	 */
4217 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4218 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4219 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4220 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4221 
4222 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4223 }
4224 
4225 /*
4226  * Adjust TX and RX queue numbers which the system actulally uses.
4227  *
4228  * The numbers are affected by below parameters.
4229  *     - The nubmer of hardware queues
4230  *     - The number of MSI-X vectors (= "nvectors" argument)
4231  *     - ncpu
4232  */
4233 static void
4234 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4235 {
4236 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4237 
4238 	if (nvectors < 2) {
4239 		sc->sc_nqueues = 1;
4240 		return;
4241 	}
4242 
4243 	switch(sc->sc_type) {
4244 	case WM_T_82572:
4245 		hw_ntxqueues = 2;
4246 		hw_nrxqueues = 2;
4247 		break;
4248 	case WM_T_82574:
4249 		hw_ntxqueues = 2;
4250 		hw_nrxqueues = 2;
4251 		break;
4252 	case WM_T_82575:
4253 		hw_ntxqueues = 4;
4254 		hw_nrxqueues = 4;
4255 		break;
4256 	case WM_T_82576:
4257 		hw_ntxqueues = 16;
4258 		hw_nrxqueues = 16;
4259 		break;
4260 	case WM_T_82580:
4261 	case WM_T_I350:
4262 	case WM_T_I354:
4263 		hw_ntxqueues = 8;
4264 		hw_nrxqueues = 8;
4265 		break;
4266 	case WM_T_I210:
4267 		hw_ntxqueues = 4;
4268 		hw_nrxqueues = 4;
4269 		break;
4270 	case WM_T_I211:
4271 		hw_ntxqueues = 2;
4272 		hw_nrxqueues = 2;
4273 		break;
4274 		/*
4275 		 * As below ethernet controllers does not support MSI-X,
4276 		 * this driver let them not use multiqueue.
4277 		 *     - WM_T_80003
4278 		 *     - WM_T_ICH8
4279 		 *     - WM_T_ICH9
4280 		 *     - WM_T_ICH10
4281 		 *     - WM_T_PCH
4282 		 *     - WM_T_PCH2
4283 		 *     - WM_T_PCH_LPT
4284 		 */
4285 	default:
4286 		hw_ntxqueues = 1;
4287 		hw_nrxqueues = 1;
4288 		break;
4289 	}
4290 
4291 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4292 
4293 	/*
4294 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
4295 	 * the number of queues used actually.
4296 	 */
4297 	if (nvectors < hw_nqueues + 1) {
4298 		sc->sc_nqueues = nvectors - 1;
4299 	} else {
4300 		sc->sc_nqueues = hw_nqueues;
4301 	}
4302 
4303 	/*
4304 	 * As queues more then cpus cannot improve scaling, we limit
4305 	 * the number of queues used actually.
4306 	 */
4307 	if (ncpu < sc->sc_nqueues)
4308 		sc->sc_nqueues = ncpu;
4309 }
4310 
4311 /*
4312  * Both single interrupt MSI and INTx can use this function.
4313  */
4314 static int
4315 wm_setup_legacy(struct wm_softc *sc)
4316 {
4317 	pci_chipset_tag_t pc = sc->sc_pc;
4318 	const char *intrstr = NULL;
4319 	char intrbuf[PCI_INTRSTR_LEN];
4320 	int error;
4321 
4322 	error = wm_alloc_txrx_queues(sc);
4323 	if (error) {
4324 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4325 		    error);
4326 		return ENOMEM;
4327 	}
4328 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4329 	    sizeof(intrbuf));
4330 #ifdef WM_MPSAFE
4331 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4332 #endif
4333 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4334 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4335 	if (sc->sc_ihs[0] == NULL) {
4336 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4337 		    (pci_intr_type(pc, sc->sc_intrs[0])
4338 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4339 		return ENOMEM;
4340 	}
4341 
4342 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4343 	sc->sc_nintrs = 1;
4344 	return 0;
4345 }
4346 
4347 static int
4348 wm_setup_msix(struct wm_softc *sc)
4349 {
4350 	void *vih;
4351 	kcpuset_t *affinity;
4352 	int qidx, error, intr_idx, txrx_established;
4353 	pci_chipset_tag_t pc = sc->sc_pc;
4354 	const char *intrstr = NULL;
4355 	char intrbuf[PCI_INTRSTR_LEN];
4356 	char intr_xname[INTRDEVNAMEBUF];
4357 
4358 	if (sc->sc_nqueues < ncpu) {
4359 		/*
4360 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
4361 		 * interrupts start from CPU#1.
4362 		 */
4363 		sc->sc_affinity_offset = 1;
4364 	} else {
4365 		/*
4366 		 * In this case, this device use all CPUs. So, we unify
4367 		 * affinitied cpu_index to msix vector number for readability.
4368 		 */
4369 		sc->sc_affinity_offset = 0;
4370 	}
4371 
4372 	error = wm_alloc_txrx_queues(sc);
4373 	if (error) {
4374 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4375 		    error);
4376 		return ENOMEM;
4377 	}
4378 
4379 	kcpuset_create(&affinity, false);
4380 	intr_idx = 0;
4381 
4382 	/*
4383 	 * TX and RX
4384 	 */
4385 	txrx_established = 0;
4386 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4387 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4388 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4389 
4390 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4391 		    sizeof(intrbuf));
4392 #ifdef WM_MPSAFE
4393 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4394 		    PCI_INTR_MPSAFE, true);
4395 #endif
4396 		memset(intr_xname, 0, sizeof(intr_xname));
4397 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4398 		    device_xname(sc->sc_dev), qidx);
4399 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4400 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4401 		if (vih == NULL) {
4402 			aprint_error_dev(sc->sc_dev,
4403 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
4404 			    intrstr ? " at " : "",
4405 			    intrstr ? intrstr : "");
4406 
4407 			goto fail;
4408 		}
4409 		kcpuset_zero(affinity);
4410 		/* Round-robin affinity */
4411 		kcpuset_set(affinity, affinity_to);
4412 		error = interrupt_distribute(vih, affinity, NULL);
4413 		if (error == 0) {
4414 			aprint_normal_dev(sc->sc_dev,
4415 			    "for TX and RX interrupting at %s affinity to %u\n",
4416 			    intrstr, affinity_to);
4417 		} else {
4418 			aprint_normal_dev(sc->sc_dev,
4419 			    "for TX and RX interrupting at %s\n", intrstr);
4420 		}
4421 		sc->sc_ihs[intr_idx] = vih;
4422 		wmq->wmq_id= qidx;
4423 		wmq->wmq_intr_idx = intr_idx;
4424 
4425 		txrx_established++;
4426 		intr_idx++;
4427 	}
4428 
4429 	/*
4430 	 * LINK
4431 	 */
4432 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4433 	    sizeof(intrbuf));
4434 #ifdef WM_MPSAFE
4435 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4436 #endif
4437 	memset(intr_xname, 0, sizeof(intr_xname));
4438 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4439 	    device_xname(sc->sc_dev));
4440 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4441 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
4442 	if (vih == NULL) {
4443 		aprint_error_dev(sc->sc_dev,
4444 		    "unable to establish MSI-X(for LINK)%s%s\n",
4445 		    intrstr ? " at " : "",
4446 		    intrstr ? intrstr : "");
4447 
4448 		goto fail;
4449 	}
4450 	/* keep default affinity to LINK interrupt */
4451 	aprint_normal_dev(sc->sc_dev,
4452 	    "for LINK interrupting at %s\n", intrstr);
4453 	sc->sc_ihs[intr_idx] = vih;
4454 	sc->sc_link_intr_idx = intr_idx;
4455 
4456 	sc->sc_nintrs = sc->sc_nqueues + 1;
4457 	kcpuset_destroy(affinity);
4458 	return 0;
4459 
4460  fail:
4461 	for (qidx = 0; qidx < txrx_established; qidx++) {
4462 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4463 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4464 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4465 	}
4466 
4467 	kcpuset_destroy(affinity);
4468 	return ENOMEM;
4469 }
4470 
4471 /*
4472  * wm_init:		[ifnet interface function]
4473  *
4474  *	Initialize the interface.
4475  */
4476 static int
4477 wm_init(struct ifnet *ifp)
4478 {
4479 	struct wm_softc *sc = ifp->if_softc;
4480 	int ret;
4481 
4482 	WM_CORE_LOCK(sc);
4483 	ret = wm_init_locked(ifp);
4484 	WM_CORE_UNLOCK(sc);
4485 
4486 	return ret;
4487 }
4488 
4489 static int
4490 wm_init_locked(struct ifnet *ifp)
4491 {
4492 	struct wm_softc *sc = ifp->if_softc;
4493 	int i, j, trynum, error = 0;
4494 	uint32_t reg;
4495 
4496 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4497 		device_xname(sc->sc_dev), __func__));
4498 	KASSERT(WM_CORE_LOCKED(sc));
4499 	/*
4500 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4501 	 * There is a small but measurable benefit to avoiding the adjusment
4502 	 * of the descriptor so that the headers are aligned, for normal mtu,
4503 	 * on such platforms.  One possibility is that the DMA itself is
4504 	 * slightly more efficient if the front of the entire packet (instead
4505 	 * of the front of the headers) is aligned.
4506 	 *
4507 	 * Note we must always set align_tweak to 0 if we are using
4508 	 * jumbo frames.
4509 	 */
4510 #ifdef __NO_STRICT_ALIGNMENT
4511 	sc->sc_align_tweak = 0;
4512 #else
4513 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4514 		sc->sc_align_tweak = 0;
4515 	else
4516 		sc->sc_align_tweak = 2;
4517 #endif /* __NO_STRICT_ALIGNMENT */
4518 
4519 	/* Cancel any pending I/O. */
4520 	wm_stop_locked(ifp, 0);
4521 
4522 	/* update statistics before reset */
4523 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4524 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4525 
4526 	/* Reset the chip to a known state. */
4527 	wm_reset(sc);
4528 
4529 	switch (sc->sc_type) {
4530 	case WM_T_82571:
4531 	case WM_T_82572:
4532 	case WM_T_82573:
4533 	case WM_T_82574:
4534 	case WM_T_82583:
4535 	case WM_T_80003:
4536 	case WM_T_ICH8:
4537 	case WM_T_ICH9:
4538 	case WM_T_ICH10:
4539 	case WM_T_PCH:
4540 	case WM_T_PCH2:
4541 	case WM_T_PCH_LPT:
4542 	case WM_T_PCH_SPT:
4543 		/* AMT based hardware can now take control from firmware */
4544 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4545 			wm_get_hw_control(sc);
4546 		break;
4547 	default:
4548 		break;
4549 	}
4550 
4551 	/* Init hardware bits */
4552 	wm_initialize_hardware_bits(sc);
4553 
4554 	/* Reset the PHY. */
4555 	if (sc->sc_flags & WM_F_HAS_MII)
4556 		wm_gmii_reset(sc);
4557 
4558 	/* Calculate (E)ITR value */
4559 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4560 		sc->sc_itr = 450;	/* For EITR */
4561 	} else if (sc->sc_type >= WM_T_82543) {
4562 		/*
4563 		 * Set up the interrupt throttling register (units of 256ns)
4564 		 * Note that a footnote in Intel's documentation says this
4565 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4566 		 * or 10Mbit mode.  Empirically, it appears to be the case
4567 		 * that that is also true for the 1024ns units of the other
4568 		 * interrupt-related timer registers -- so, really, we ought
4569 		 * to divide this value by 4 when the link speed is low.
4570 		 *
4571 		 * XXX implement this division at link speed change!
4572 		 */
4573 
4574 		/*
4575 		 * For N interrupts/sec, set this value to:
4576 		 * 1000000000 / (N * 256).  Note that we set the
4577 		 * absolute and packet timer values to this value
4578 		 * divided by 4 to get "simple timer" behavior.
4579 		 */
4580 
4581 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4582 	}
4583 
4584 	error = wm_init_txrx_queues(sc);
4585 	if (error)
4586 		goto out;
4587 
4588 	/*
4589 	 * Clear out the VLAN table -- we don't use it (yet).
4590 	 */
4591 	CSR_WRITE(sc, WMREG_VET, 0);
4592 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4593 		trynum = 10; /* Due to hw errata */
4594 	else
4595 		trynum = 1;
4596 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4597 		for (j = 0; j < trynum; j++)
4598 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4599 
4600 	/*
4601 	 * Set up flow-control parameters.
4602 	 *
4603 	 * XXX Values could probably stand some tuning.
4604 	 */
4605 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4606 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4607 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4608 	    && (sc->sc_type != WM_T_PCH_SPT)) {
4609 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4610 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4611 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4612 	}
4613 
4614 	sc->sc_fcrtl = FCRTL_DFLT;
4615 	if (sc->sc_type < WM_T_82543) {
4616 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4617 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4618 	} else {
4619 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4620 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4621 	}
4622 
4623 	if (sc->sc_type == WM_T_80003)
4624 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4625 	else
4626 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4627 
4628 	/* Writes the control register. */
4629 	wm_set_vlan(sc);
4630 
4631 	if (sc->sc_flags & WM_F_HAS_MII) {
4632 		int val;
4633 
4634 		switch (sc->sc_type) {
4635 		case WM_T_80003:
4636 		case WM_T_ICH8:
4637 		case WM_T_ICH9:
4638 		case WM_T_ICH10:
4639 		case WM_T_PCH:
4640 		case WM_T_PCH2:
4641 		case WM_T_PCH_LPT:
4642 		case WM_T_PCH_SPT:
4643 			/*
4644 			 * Set the mac to wait the maximum time between each
4645 			 * iteration and increase the max iterations when
4646 			 * polling the phy; this fixes erroneous timeouts at
4647 			 * 10Mbps.
4648 			 */
4649 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4650 			    0xFFFF);
4651 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4652 			val |= 0x3F;
4653 			wm_kmrn_writereg(sc,
4654 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4655 			break;
4656 		default:
4657 			break;
4658 		}
4659 
4660 		if (sc->sc_type == WM_T_80003) {
4661 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4662 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4663 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4664 
4665 			/* Bypass RX and TX FIFO's */
4666 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4667 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4668 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4669 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4670 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4671 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4672 		}
4673 	}
4674 #if 0
4675 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4676 #endif
4677 
4678 	/* Set up checksum offload parameters. */
4679 	reg = CSR_READ(sc, WMREG_RXCSUM);
4680 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4681 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4682 		reg |= RXCSUM_IPOFL;
4683 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4684 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4685 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4686 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4687 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4688 
4689 	/* Set up MSI-X */
4690 	if (sc->sc_nintrs > 1) {
4691 		uint32_t ivar;
4692 		struct wm_queue *wmq;
4693 		int qid, qintr_idx;
4694 
4695 		if (sc->sc_type == WM_T_82575) {
4696 			/* Interrupt control */
4697 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4698 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4699 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4700 
4701 			/* TX and RX */
4702 			for (i = 0; i < sc->sc_nqueues; i++) {
4703 				wmq = &sc->sc_queue[i];
4704 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
4705 				    EITR_TX_QUEUE(wmq->wmq_id)
4706 				    | EITR_RX_QUEUE(wmq->wmq_id));
4707 			}
4708 			/* Link status */
4709 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4710 			    EITR_OTHER);
4711 		} else if (sc->sc_type == WM_T_82574) {
4712 			/* Interrupt control */
4713 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4714 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4715 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4716 
4717 			ivar = 0;
4718 			/* TX and RX */
4719 			for (i = 0; i < sc->sc_nqueues; i++) {
4720 				wmq = &sc->sc_queue[i];
4721 				qid = wmq->wmq_id;
4722 				qintr_idx = wmq->wmq_intr_idx;
4723 
4724 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4725 				    IVAR_TX_MASK_Q_82574(qid));
4726 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4727 				    IVAR_RX_MASK_Q_82574(qid));
4728 			}
4729 			/* Link status */
4730 			ivar |= __SHIFTIN((IVAR_VALID_82574
4731 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4732 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4733 		} else {
4734 			/* Interrupt control */
4735 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4736 			    | GPIE_EIAME | GPIE_PBA);
4737 
4738 			switch (sc->sc_type) {
4739 			case WM_T_82580:
4740 			case WM_T_I350:
4741 			case WM_T_I354:
4742 			case WM_T_I210:
4743 			case WM_T_I211:
4744 				/* TX and RX */
4745 				for (i = 0; i < sc->sc_nqueues; i++) {
4746 					wmq = &sc->sc_queue[i];
4747 					qid = wmq->wmq_id;
4748 					qintr_idx = wmq->wmq_intr_idx;
4749 
4750 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4751 					ivar &= ~IVAR_TX_MASK_Q(qid);
4752 					ivar |= __SHIFTIN((qintr_idx
4753 						| IVAR_VALID),
4754 					    IVAR_TX_MASK_Q(qid));
4755 					ivar &= ~IVAR_RX_MASK_Q(qid);
4756 					ivar |= __SHIFTIN((qintr_idx
4757 						| IVAR_VALID),
4758 					    IVAR_RX_MASK_Q(qid));
4759 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4760 				}
4761 				break;
4762 			case WM_T_82576:
4763 				/* TX and RX */
4764 				for (i = 0; i < sc->sc_nqueues; i++) {
4765 					wmq = &sc->sc_queue[i];
4766 					qid = wmq->wmq_id;
4767 					qintr_idx = wmq->wmq_intr_idx;
4768 
4769 					ivar = CSR_READ(sc,
4770 					    WMREG_IVAR_Q_82576(qid));
4771 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4772 					ivar |= __SHIFTIN((qintr_idx
4773 						| IVAR_VALID),
4774 					    IVAR_TX_MASK_Q_82576(qid));
4775 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4776 					ivar |= __SHIFTIN((qintr_idx
4777 						| IVAR_VALID),
4778 					    IVAR_RX_MASK_Q_82576(qid));
4779 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4780 					    ivar);
4781 				}
4782 				break;
4783 			default:
4784 				break;
4785 			}
4786 
4787 			/* Link status */
4788 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4789 			    IVAR_MISC_OTHER);
4790 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4791 		}
4792 
4793 		if (sc->sc_nqueues > 1) {
4794 			wm_init_rss(sc);
4795 
4796 			/*
4797 			** NOTE: Receive Full-Packet Checksum Offload
4798 			** is mutually exclusive with Multiqueue. However
4799 			** this is not the same as TCP/IP checksums which
4800 			** still work.
4801 			*/
4802 			reg = CSR_READ(sc, WMREG_RXCSUM);
4803 			reg |= RXCSUM_PCSD;
4804 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
4805 		}
4806 	}
4807 
4808 	/* Set up the interrupt registers. */
4809 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4810 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4811 	    ICR_RXO | ICR_RXT0;
4812 	if (sc->sc_nintrs > 1) {
4813 		uint32_t mask;
4814 		struct wm_queue *wmq;
4815 
4816 		switch (sc->sc_type) {
4817 		case WM_T_82574:
4818 			CSR_WRITE(sc, WMREG_EIAC_82574,
4819 			    WMREG_EIAC_82574_MSIX_MASK);
4820 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4821 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4822 			break;
4823 		default:
4824 			if (sc->sc_type == WM_T_82575) {
4825 				mask = 0;
4826 				for (i = 0; i < sc->sc_nqueues; i++) {
4827 					wmq = &sc->sc_queue[i];
4828 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
4829 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
4830 				}
4831 				mask |= EITR_OTHER;
4832 			} else {
4833 				mask = 0;
4834 				for (i = 0; i < sc->sc_nqueues; i++) {
4835 					wmq = &sc->sc_queue[i];
4836 					mask |= 1 << wmq->wmq_intr_idx;
4837 				}
4838 				mask |= 1 << sc->sc_link_intr_idx;
4839 			}
4840 			CSR_WRITE(sc, WMREG_EIAC, mask);
4841 			CSR_WRITE(sc, WMREG_EIAM, mask);
4842 			CSR_WRITE(sc, WMREG_EIMS, mask);
4843 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4844 			break;
4845 		}
4846 	} else
4847 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4848 
4849 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4850 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4851 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4852 	    || (sc->sc_type == WM_T_PCH_SPT)) {
4853 		reg = CSR_READ(sc, WMREG_KABGTXD);
4854 		reg |= KABGTXD_BGSQLBIAS;
4855 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4856 	}
4857 
4858 	/* Set up the inter-packet gap. */
4859 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4860 
4861 	if (sc->sc_type >= WM_T_82543) {
4862 		/*
4863 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
4864 		 * the multi queue function with MSI-X.
4865 		 */
4866 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4867 			int qidx;
4868 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4869 				struct wm_queue *wmq = &sc->sc_queue[qidx];
4870 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
4871 				    sc->sc_itr);
4872 			}
4873 			/*
4874 			 * Link interrupts occur much less than TX
4875 			 * interrupts and RX interrupts. So, we don't
4876 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4877 			 * FreeBSD's if_igb.
4878 			 */
4879 		} else
4880 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4881 	}
4882 
4883 	/* Set the VLAN ethernetype. */
4884 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4885 
4886 	/*
4887 	 * Set up the transmit control register; we start out with
4888 	 * a collision distance suitable for FDX, but update it whe
4889 	 * we resolve the media type.
4890 	 */
4891 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4892 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4893 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4894 	if (sc->sc_type >= WM_T_82571)
4895 		sc->sc_tctl |= TCTL_MULR;
4896 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4897 
4898 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4899 		/* Write TDT after TCTL.EN is set. See the document. */
4900 		CSR_WRITE(sc, WMREG_TDT(0), 0);
4901 	}
4902 
4903 	if (sc->sc_type == WM_T_80003) {
4904 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4905 		reg &= ~TCTL_EXT_GCEX_MASK;
4906 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4907 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4908 	}
4909 
4910 	/* Set the media. */
4911 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4912 		goto out;
4913 
4914 	/* Configure for OS presence */
4915 	wm_init_manageability(sc);
4916 
4917 	/*
4918 	 * Set up the receive control register; we actually program
4919 	 * the register when we set the receive filter.  Use multicast
4920 	 * address offset type 0.
4921 	 *
4922 	 * Only the i82544 has the ability to strip the incoming
4923 	 * CRC, so we don't enable that feature.
4924 	 */
4925 	sc->sc_mchash_type = 0;
4926 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
4927 	    | RCTL_MO(sc->sc_mchash_type);
4928 
4929 	/*
4930 	 * The I350 has a bug where it always strips the CRC whether
4931 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
4932 	 */
4933 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4934 	    || (sc->sc_type == WM_T_I210))
4935 		sc->sc_rctl |= RCTL_SECRC;
4936 
4937 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
4938 	    && (ifp->if_mtu > ETHERMTU)) {
4939 		sc->sc_rctl |= RCTL_LPE;
4940 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
4941 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
4942 	}
4943 
4944 	if (MCLBYTES == 2048) {
4945 		sc->sc_rctl |= RCTL_2k;
4946 	} else {
4947 		if (sc->sc_type >= WM_T_82543) {
4948 			switch (MCLBYTES) {
4949 			case 4096:
4950 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
4951 				break;
4952 			case 8192:
4953 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
4954 				break;
4955 			case 16384:
4956 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
4957 				break;
4958 			default:
4959 				panic("wm_init: MCLBYTES %d unsupported",
4960 				    MCLBYTES);
4961 				break;
4962 			}
4963 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
4964 	}
4965 
4966 	/* Set the receive filter. */
4967 	wm_set_filter(sc);
4968 
4969 	/* Enable ECC */
4970 	switch (sc->sc_type) {
4971 	case WM_T_82571:
4972 		reg = CSR_READ(sc, WMREG_PBA_ECC);
4973 		reg |= PBA_ECC_CORR_EN;
4974 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
4975 		break;
4976 	case WM_T_PCH_LPT:
4977 	case WM_T_PCH_SPT:
4978 		reg = CSR_READ(sc, WMREG_PBECCSTS);
4979 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
4980 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
4981 
4982 		reg = CSR_READ(sc, WMREG_CTRL);
4983 		reg |= CTRL_MEHE;
4984 		CSR_WRITE(sc, WMREG_CTRL, reg);
4985 		break;
4986 	default:
4987 		break;
4988 	}
4989 
4990 	/* On 575 and later set RDT only if RX enabled */
4991 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4992 		int qidx;
4993 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4994 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
4995 			for (i = 0; i < WM_NRXDESC; i++) {
4996 				mutex_enter(rxq->rxq_lock);
4997 				wm_init_rxdesc(rxq, i);
4998 				mutex_exit(rxq->rxq_lock);
4999 
5000 			}
5001 		}
5002 	}
5003 
5004 	sc->sc_stopping = false;
5005 
5006 	/* Start the one second link check clock. */
5007 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5008 
5009 	/* ...all done! */
5010 	ifp->if_flags |= IFF_RUNNING;
5011 	ifp->if_flags &= ~IFF_OACTIVE;
5012 
5013  out:
5014 	sc->sc_if_flags = ifp->if_flags;
5015 	if (error)
5016 		log(LOG_ERR, "%s: interface not running\n",
5017 		    device_xname(sc->sc_dev));
5018 	return error;
5019 }
5020 
5021 /*
5022  * wm_stop:		[ifnet interface function]
5023  *
5024  *	Stop transmission on the interface.
5025  */
5026 static void
5027 wm_stop(struct ifnet *ifp, int disable)
5028 {
5029 	struct wm_softc *sc = ifp->if_softc;
5030 
5031 	WM_CORE_LOCK(sc);
5032 	wm_stop_locked(ifp, disable);
5033 	WM_CORE_UNLOCK(sc);
5034 }
5035 
5036 static void
5037 wm_stop_locked(struct ifnet *ifp, int disable)
5038 {
5039 	struct wm_softc *sc = ifp->if_softc;
5040 	struct wm_txsoft *txs;
5041 	int i, qidx;
5042 
5043 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5044 		device_xname(sc->sc_dev), __func__));
5045 	KASSERT(WM_CORE_LOCKED(sc));
5046 
5047 	sc->sc_stopping = true;
5048 
5049 	/* Stop the one second clock. */
5050 	callout_stop(&sc->sc_tick_ch);
5051 
5052 	/* Stop the 82547 Tx FIFO stall check timer. */
5053 	if (sc->sc_type == WM_T_82547)
5054 		callout_stop(&sc->sc_txfifo_ch);
5055 
5056 	if (sc->sc_flags & WM_F_HAS_MII) {
5057 		/* Down the MII. */
5058 		mii_down(&sc->sc_mii);
5059 	} else {
5060 #if 0
5061 		/* Should we clear PHY's status properly? */
5062 		wm_reset(sc);
5063 #endif
5064 	}
5065 
5066 	/* Stop the transmit and receive processes. */
5067 	CSR_WRITE(sc, WMREG_TCTL, 0);
5068 	CSR_WRITE(sc, WMREG_RCTL, 0);
5069 	sc->sc_rctl &= ~RCTL_EN;
5070 
5071 	/*
5072 	 * Clear the interrupt mask to ensure the device cannot assert its
5073 	 * interrupt line.
5074 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5075 	 * service any currently pending or shared interrupt.
5076 	 */
5077 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5078 	sc->sc_icr = 0;
5079 	if (sc->sc_nintrs > 1) {
5080 		if (sc->sc_type != WM_T_82574) {
5081 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5082 			CSR_WRITE(sc, WMREG_EIAC, 0);
5083 		} else
5084 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5085 	}
5086 
5087 	/* Release any queued transmit buffers. */
5088 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5089 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5090 		struct wm_txqueue *txq = &wmq->wmq_txq;
5091 		mutex_enter(txq->txq_lock);
5092 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5093 			txs = &txq->txq_soft[i];
5094 			if (txs->txs_mbuf != NULL) {
5095 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5096 				m_freem(txs->txs_mbuf);
5097 				txs->txs_mbuf = NULL;
5098 			}
5099 		}
5100 		if (sc->sc_type == WM_T_PCH_SPT) {
5101 			pcireg_t preg;
5102 			uint32_t reg;
5103 			int nexttx;
5104 
5105 			/* First, disable MULR fix in FEXTNVM11 */
5106 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
5107 			reg |= FEXTNVM11_DIS_MULRFIX;
5108 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5109 
5110 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5111 			    WM_PCI_DESCRING_STATUS);
5112 			reg = CSR_READ(sc, WMREG_TDLEN(0));
5113 			printf("XXX RST: FLUSH = %08x, len = %u\n",
5114 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
5115 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
5116 			    && (reg != 0)) {
5117 				/* TX */
5118 				printf("XXX need TX flush (reg = %08x)\n",
5119 				    preg);
5120 				wm_init_tx_descs(sc, txq);
5121 				wm_init_tx_regs(sc, wmq, txq);
5122 				nexttx = txq->txq_next;
5123 				wm_set_dma_addr(
5124 					&txq->txq_descs[nexttx].wtx_addr,
5125 					WM_CDTXADDR(txq, nexttx));
5126 				txq->txq_descs[nexttx].wtx_cmdlen
5127 				    = htole32(WTX_CMD_IFCS | 512);
5128 				wm_cdtxsync(txq, nexttx, 1,
5129 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
5130 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
5131 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
5132 				CSR_WRITE_FLUSH(sc);
5133 				delay(250);
5134 				CSR_WRITE(sc, WMREG_TCTL, 0);
5135 			}
5136 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5137 			    WM_PCI_DESCRING_STATUS);
5138 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
5139 				/* RX */
5140 				printf("XXX need RX flush\n");
5141 			}
5142 		}
5143 		mutex_exit(txq->txq_lock);
5144 	}
5145 
5146 	/* Mark the interface as down and cancel the watchdog timer. */
5147 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5148 	ifp->if_timer = 0;
5149 
5150 	if (disable) {
5151 		for (i = 0; i < sc->sc_nqueues; i++) {
5152 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5153 			mutex_enter(rxq->rxq_lock);
5154 			wm_rxdrain(rxq);
5155 			mutex_exit(rxq->rxq_lock);
5156 		}
5157 	}
5158 
5159 #if 0 /* notyet */
5160 	if (sc->sc_type >= WM_T_82544)
5161 		CSR_WRITE(sc, WMREG_WUC, 0);
5162 #endif
5163 }
5164 
5165 static void
5166 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5167 {
5168 	struct mbuf *m;
5169 	int i;
5170 
5171 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5172 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5173 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5174 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5175 		    m->m_data, m->m_len, m->m_flags);
5176 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5177 	    i, i == 1 ? "" : "s");
5178 }
5179 
5180 /*
5181  * wm_82547_txfifo_stall:
5182  *
5183  *	Callout used to wait for the 82547 Tx FIFO to drain,
5184  *	reset the FIFO pointers, and restart packet transmission.
5185  */
5186 static void
5187 wm_82547_txfifo_stall(void *arg)
5188 {
5189 	struct wm_softc *sc = arg;
5190 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5191 
5192 	mutex_enter(txq->txq_lock);
5193 
5194 	if (sc->sc_stopping)
5195 		goto out;
5196 
5197 	if (txq->txq_fifo_stall) {
5198 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5199 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5200 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5201 			/*
5202 			 * Packets have drained.  Stop transmitter, reset
5203 			 * FIFO pointers, restart transmitter, and kick
5204 			 * the packet queue.
5205 			 */
5206 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5207 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5208 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5209 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5210 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5211 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5212 			CSR_WRITE(sc, WMREG_TCTL, tctl);
5213 			CSR_WRITE_FLUSH(sc);
5214 
5215 			txq->txq_fifo_head = 0;
5216 			txq->txq_fifo_stall = 0;
5217 			wm_start_locked(&sc->sc_ethercom.ec_if);
5218 		} else {
5219 			/*
5220 			 * Still waiting for packets to drain; try again in
5221 			 * another tick.
5222 			 */
5223 			callout_schedule(&sc->sc_txfifo_ch, 1);
5224 		}
5225 	}
5226 
5227 out:
5228 	mutex_exit(txq->txq_lock);
5229 }
5230 
5231 /*
5232  * wm_82547_txfifo_bugchk:
5233  *
5234  *	Check for bug condition in the 82547 Tx FIFO.  We need to
5235  *	prevent enqueueing a packet that would wrap around the end
5236  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
5237  *
5238  *	We do this by checking the amount of space before the end
5239  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
5240  *	the Tx FIFO, wait for all remaining packets to drain, reset
5241  *	the internal FIFO pointers to the beginning, and restart
5242  *	transmission on the interface.
5243  */
5244 #define	WM_FIFO_HDR		0x10
5245 #define	WM_82547_PAD_LEN	0x3e0
5246 static int
5247 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5248 {
5249 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5250 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
5251 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5252 
5253 	/* Just return if already stalled. */
5254 	if (txq->txq_fifo_stall)
5255 		return 1;
5256 
5257 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5258 		/* Stall only occurs in half-duplex mode. */
5259 		goto send_packet;
5260 	}
5261 
5262 	if (len >= WM_82547_PAD_LEN + space) {
5263 		txq->txq_fifo_stall = 1;
5264 		callout_schedule(&sc->sc_txfifo_ch, 1);
5265 		return 1;
5266 	}
5267 
5268  send_packet:
5269 	txq->txq_fifo_head += len;
5270 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
5271 		txq->txq_fifo_head -= txq->txq_fifo_size;
5272 
5273 	return 0;
5274 }
5275 
5276 static int
5277 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5278 {
5279 	int error;
5280 
5281 	/*
5282 	 * Allocate the control data structures, and create and load the
5283 	 * DMA map for it.
5284 	 *
5285 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5286 	 * memory.  So must Rx descriptors.  We simplify by allocating
5287 	 * both sets within the same 4G segment.
5288 	 */
5289 	if (sc->sc_type < WM_T_82544)
5290 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
5291 	else
5292 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
5293 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5294 		txq->txq_descsize = sizeof(nq_txdesc_t);
5295 	else
5296 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
5297 
5298 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5299 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5300 		    1, &txq->txq_desc_rseg, 0)) != 0) {
5301 		aprint_error_dev(sc->sc_dev,
5302 		    "unable to allocate TX control data, error = %d\n",
5303 		    error);
5304 		goto fail_0;
5305 	}
5306 
5307 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5308 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5309 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5310 		aprint_error_dev(sc->sc_dev,
5311 		    "unable to map TX control data, error = %d\n", error);
5312 		goto fail_1;
5313 	}
5314 
5315 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5316 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5317 		aprint_error_dev(sc->sc_dev,
5318 		    "unable to create TX control data DMA map, error = %d\n",
5319 		    error);
5320 		goto fail_2;
5321 	}
5322 
5323 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5324 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5325 		aprint_error_dev(sc->sc_dev,
5326 		    "unable to load TX control data DMA map, error = %d\n",
5327 		    error);
5328 		goto fail_3;
5329 	}
5330 
5331 	return 0;
5332 
5333  fail_3:
5334 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5335  fail_2:
5336 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5337 	    WM_TXDESCS_SIZE(txq));
5338  fail_1:
5339 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5340  fail_0:
5341 	return error;
5342 }
5343 
5344 static void
5345 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5346 {
5347 
5348 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5349 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5350 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5351 	    WM_TXDESCS_SIZE(txq));
5352 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5353 }
5354 
5355 static int
5356 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5357 {
5358 	int error;
5359 
5360 	/*
5361 	 * Allocate the control data structures, and create and load the
5362 	 * DMA map for it.
5363 	 *
5364 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5365 	 * memory.  So must Rx descriptors.  We simplify by allocating
5366 	 * both sets within the same 4G segment.
5367 	 */
5368 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5369 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5370 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5371 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
5372 		aprint_error_dev(sc->sc_dev,
5373 		    "unable to allocate RX control data, error = %d\n",
5374 		    error);
5375 		goto fail_0;
5376 	}
5377 
5378 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5379 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5380 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5381 		aprint_error_dev(sc->sc_dev,
5382 		    "unable to map RX control data, error = %d\n", error);
5383 		goto fail_1;
5384 	}
5385 
5386 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5387 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5388 		aprint_error_dev(sc->sc_dev,
5389 		    "unable to create RX control data DMA map, error = %d\n",
5390 		    error);
5391 		goto fail_2;
5392 	}
5393 
5394 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5395 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5396 		aprint_error_dev(sc->sc_dev,
5397 		    "unable to load RX control data DMA map, error = %d\n",
5398 		    error);
5399 		goto fail_3;
5400 	}
5401 
5402 	return 0;
5403 
5404  fail_3:
5405 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5406  fail_2:
5407 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5408 	    rxq->rxq_desc_size);
5409  fail_1:
5410 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5411  fail_0:
5412 	return error;
5413 }
5414 
5415 static void
5416 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5417 {
5418 
5419 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5420 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5421 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5422 	    rxq->rxq_desc_size);
5423 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5424 }
5425 
5426 
5427 static int
5428 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5429 {
5430 	int i, error;
5431 
5432 	/* Create the transmit buffer DMA maps. */
5433 	WM_TXQUEUELEN(txq) =
5434 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5435 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5436 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5437 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5438 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5439 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
5440 			aprint_error_dev(sc->sc_dev,
5441 			    "unable to create Tx DMA map %d, error = %d\n",
5442 			    i, error);
5443 			goto fail;
5444 		}
5445 	}
5446 
5447 	return 0;
5448 
5449  fail:
5450 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5451 		if (txq->txq_soft[i].txs_dmamap != NULL)
5452 			bus_dmamap_destroy(sc->sc_dmat,
5453 			    txq->txq_soft[i].txs_dmamap);
5454 	}
5455 	return error;
5456 }
5457 
5458 static void
5459 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5460 {
5461 	int i;
5462 
5463 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5464 		if (txq->txq_soft[i].txs_dmamap != NULL)
5465 			bus_dmamap_destroy(sc->sc_dmat,
5466 			    txq->txq_soft[i].txs_dmamap);
5467 	}
5468 }
5469 
5470 static int
5471 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5472 {
5473 	int i, error;
5474 
5475 	/* Create the receive buffer DMA maps. */
5476 	for (i = 0; i < WM_NRXDESC; i++) {
5477 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5478 			    MCLBYTES, 0, 0,
5479 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5480 			aprint_error_dev(sc->sc_dev,
5481 			    "unable to create Rx DMA map %d error = %d\n",
5482 			    i, error);
5483 			goto fail;
5484 		}
5485 		rxq->rxq_soft[i].rxs_mbuf = NULL;
5486 	}
5487 
5488 	return 0;
5489 
5490  fail:
5491 	for (i = 0; i < WM_NRXDESC; i++) {
5492 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5493 			bus_dmamap_destroy(sc->sc_dmat,
5494 			    rxq->rxq_soft[i].rxs_dmamap);
5495 	}
5496 	return error;
5497 }
5498 
5499 static void
5500 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5501 {
5502 	int i;
5503 
5504 	for (i = 0; i < WM_NRXDESC; i++) {
5505 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5506 			bus_dmamap_destroy(sc->sc_dmat,
5507 			    rxq->rxq_soft[i].rxs_dmamap);
5508 	}
5509 }
5510 
5511 /*
5512  * wm_alloc_quques:
5513  *	Allocate {tx,rx}descs and {tx,rx} buffers
5514  */
5515 static int
5516 wm_alloc_txrx_queues(struct wm_softc *sc)
5517 {
5518 	int i, error, tx_done, rx_done;
5519 
5520 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5521 	    KM_SLEEP);
5522 	if (sc->sc_queue == NULL) {
5523 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5524 		error = ENOMEM;
5525 		goto fail_0;
5526 	}
5527 
5528 	/*
5529 	 * For transmission
5530 	 */
5531 	error = 0;
5532 	tx_done = 0;
5533 	for (i = 0; i < sc->sc_nqueues; i++) {
5534 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5535 		txq->txq_sc = sc;
5536 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5537 
5538 		error = wm_alloc_tx_descs(sc, txq);
5539 		if (error)
5540 			break;
5541 		error = wm_alloc_tx_buffer(sc, txq);
5542 		if (error) {
5543 			wm_free_tx_descs(sc, txq);
5544 			break;
5545 		}
5546 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5547 		if (txq->txq_interq == NULL) {
5548 			wm_free_tx_descs(sc, txq);
5549 			wm_free_tx_buffer(sc, txq);
5550 			error = ENOMEM;
5551 			break;
5552 		}
5553 		tx_done++;
5554 	}
5555 	if (error)
5556 		goto fail_1;
5557 
5558 	/*
5559 	 * For recieve
5560 	 */
5561 	error = 0;
5562 	rx_done = 0;
5563 	for (i = 0; i < sc->sc_nqueues; i++) {
5564 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5565 		rxq->rxq_sc = sc;
5566 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5567 
5568 		error = wm_alloc_rx_descs(sc, rxq);
5569 		if (error)
5570 			break;
5571 
5572 		error = wm_alloc_rx_buffer(sc, rxq);
5573 		if (error) {
5574 			wm_free_rx_descs(sc, rxq);
5575 			break;
5576 		}
5577 
5578 		rx_done++;
5579 	}
5580 	if (error)
5581 		goto fail_2;
5582 
5583 	return 0;
5584 
5585  fail_2:
5586 	for (i = 0; i < rx_done; i++) {
5587 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5588 		wm_free_rx_buffer(sc, rxq);
5589 		wm_free_rx_descs(sc, rxq);
5590 		if (rxq->rxq_lock)
5591 			mutex_obj_free(rxq->rxq_lock);
5592 	}
5593  fail_1:
5594 	for (i = 0; i < tx_done; i++) {
5595 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5596 		pcq_destroy(txq->txq_interq);
5597 		wm_free_tx_buffer(sc, txq);
5598 		wm_free_tx_descs(sc, txq);
5599 		if (txq->txq_lock)
5600 			mutex_obj_free(txq->txq_lock);
5601 	}
5602 
5603 	kmem_free(sc->sc_queue,
5604 	    sizeof(struct wm_queue) * sc->sc_nqueues);
5605  fail_0:
5606 	return error;
5607 }
5608 
5609 /*
5610  * wm_free_quques:
5611  *	Free {tx,rx}descs and {tx,rx} buffers
5612  */
5613 static void
5614 wm_free_txrx_queues(struct wm_softc *sc)
5615 {
5616 	int i;
5617 
5618 	for (i = 0; i < sc->sc_nqueues; i++) {
5619 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5620 		wm_free_rx_buffer(sc, rxq);
5621 		wm_free_rx_descs(sc, rxq);
5622 		if (rxq->rxq_lock)
5623 			mutex_obj_free(rxq->rxq_lock);
5624 	}
5625 
5626 	for (i = 0; i < sc->sc_nqueues; i++) {
5627 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5628 		wm_free_tx_buffer(sc, txq);
5629 		wm_free_tx_descs(sc, txq);
5630 		if (txq->txq_lock)
5631 			mutex_obj_free(txq->txq_lock);
5632 	}
5633 
5634 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
5635 }
5636 
5637 static void
5638 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5639 {
5640 
5641 	KASSERT(mutex_owned(txq->txq_lock));
5642 
5643 	/* Initialize the transmit descriptor ring. */
5644 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5645 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5646 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5647 	txq->txq_free = WM_NTXDESC(txq);
5648 	txq->txq_next = 0;
5649 }
5650 
5651 static void
5652 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5653     struct wm_txqueue *txq)
5654 {
5655 
5656 	KASSERT(mutex_owned(txq->txq_lock));
5657 
5658 	if (sc->sc_type < WM_T_82543) {
5659 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5660 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5661 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
5662 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5663 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5664 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5665 	} else {
5666 		int qid = wmq->wmq_id;
5667 
5668 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5669 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5670 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
5671 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
5672 
5673 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5674 			/*
5675 			 * Don't write TDT before TCTL.EN is set.
5676 			 * See the document.
5677 			 */
5678 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5679 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5680 			    | TXDCTL_WTHRESH(0));
5681 		else {
5682 			/* ITR / 4 */
5683 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5684 			if (sc->sc_type >= WM_T_82540) {
5685 				/* should be same */
5686 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5687 			}
5688 
5689 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
5690 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5691 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5692 		}
5693 	}
5694 }
5695 
5696 static void
5697 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5698 {
5699 	int i;
5700 
5701 	KASSERT(mutex_owned(txq->txq_lock));
5702 
5703 	/* Initialize the transmit job descriptors. */
5704 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5705 		txq->txq_soft[i].txs_mbuf = NULL;
5706 	txq->txq_sfree = WM_TXQUEUELEN(txq);
5707 	txq->txq_snext = 0;
5708 	txq->txq_sdirty = 0;
5709 }
5710 
5711 static void
5712 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5713     struct wm_txqueue *txq)
5714 {
5715 
5716 	KASSERT(mutex_owned(txq->txq_lock));
5717 
5718 	/*
5719 	 * Set up some register offsets that are different between
5720 	 * the i82542 and the i82543 and later chips.
5721 	 */
5722 	if (sc->sc_type < WM_T_82543)
5723 		txq->txq_tdt_reg = WMREG_OLD_TDT;
5724 	else
5725 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
5726 
5727 	wm_init_tx_descs(sc, txq);
5728 	wm_init_tx_regs(sc, wmq, txq);
5729 	wm_init_tx_buffer(sc, txq);
5730 }
5731 
5732 static void
5733 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5734     struct wm_rxqueue *rxq)
5735 {
5736 
5737 	KASSERT(mutex_owned(rxq->rxq_lock));
5738 
5739 	/*
5740 	 * Initialize the receive descriptor and receive job
5741 	 * descriptor rings.
5742 	 */
5743 	if (sc->sc_type < WM_T_82543) {
5744 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5745 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5746 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5747 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5748 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5749 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5750 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5751 
5752 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5753 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5754 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5755 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5756 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5757 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5758 	} else {
5759 		int qid = wmq->wmq_id;
5760 
5761 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5762 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5763 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5764 
5765 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5766 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5767 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5768 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5769 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5770 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5771 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5772 			    | RXDCTL_WTHRESH(1));
5773 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5774 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5775 		} else {
5776 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5777 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5778 			/* ITR / 4 */
5779 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5780 			/* MUST be same */
5781 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5782 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5783 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5784 		}
5785 	}
5786 }
5787 
5788 static int
5789 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5790 {
5791 	struct wm_rxsoft *rxs;
5792 	int error, i;
5793 
5794 	KASSERT(mutex_owned(rxq->rxq_lock));
5795 
5796 	for (i = 0; i < WM_NRXDESC; i++) {
5797 		rxs = &rxq->rxq_soft[i];
5798 		if (rxs->rxs_mbuf == NULL) {
5799 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5800 				log(LOG_ERR, "%s: unable to allocate or map "
5801 				    "rx buffer %d, error = %d\n",
5802 				    device_xname(sc->sc_dev), i, error);
5803 				/*
5804 				 * XXX Should attempt to run with fewer receive
5805 				 * XXX buffers instead of just failing.
5806 				 */
5807 				wm_rxdrain(rxq);
5808 				return ENOMEM;
5809 			}
5810 		} else {
5811 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5812 				wm_init_rxdesc(rxq, i);
5813 			/*
5814 			 * For 82575 and newer device, the RX descriptors
5815 			 * must be initialized after the setting of RCTL.EN in
5816 			 * wm_set_filter()
5817 			 */
5818 		}
5819 	}
5820 	rxq->rxq_ptr = 0;
5821 	rxq->rxq_discard = 0;
5822 	WM_RXCHAIN_RESET(rxq);
5823 
5824 	return 0;
5825 }
5826 
5827 static int
5828 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5829     struct wm_rxqueue *rxq)
5830 {
5831 
5832 	KASSERT(mutex_owned(rxq->rxq_lock));
5833 
5834 	/*
5835 	 * Set up some register offsets that are different between
5836 	 * the i82542 and the i82543 and later chips.
5837 	 */
5838 	if (sc->sc_type < WM_T_82543)
5839 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5840 	else
5841 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
5842 
5843 	wm_init_rx_regs(sc, wmq, rxq);
5844 	return wm_init_rx_buffer(sc, rxq);
5845 }
5846 
5847 /*
5848  * wm_init_quques:
5849  *	Initialize {tx,rx}descs and {tx,rx} buffers
5850  */
5851 static int
5852 wm_init_txrx_queues(struct wm_softc *sc)
5853 {
5854 	int i, error = 0;
5855 
5856 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5857 		device_xname(sc->sc_dev), __func__));
5858 	for (i = 0; i < sc->sc_nqueues; i++) {
5859 		struct wm_queue *wmq = &sc->sc_queue[i];
5860 		struct wm_txqueue *txq = &wmq->wmq_txq;
5861 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5862 
5863 		mutex_enter(txq->txq_lock);
5864 		wm_init_tx_queue(sc, wmq, txq);
5865 		mutex_exit(txq->txq_lock);
5866 
5867 		mutex_enter(rxq->rxq_lock);
5868 		error = wm_init_rx_queue(sc, wmq, rxq);
5869 		mutex_exit(rxq->rxq_lock);
5870 		if (error)
5871 			break;
5872 	}
5873 
5874 	return error;
5875 }
5876 
5877 /*
5878  * wm_tx_offload:
5879  *
5880  *	Set up TCP/IP checksumming parameters for the
5881  *	specified packet.
5882  */
5883 static int
5884 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
5885     uint8_t *fieldsp)
5886 {
5887 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5888 	struct mbuf *m0 = txs->txs_mbuf;
5889 	struct livengood_tcpip_ctxdesc *t;
5890 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
5891 	uint32_t ipcse;
5892 	struct ether_header *eh;
5893 	int offset, iphl;
5894 	uint8_t fields;
5895 
5896 	/*
5897 	 * XXX It would be nice if the mbuf pkthdr had offset
5898 	 * fields for the protocol headers.
5899 	 */
5900 
5901 	eh = mtod(m0, struct ether_header *);
5902 	switch (htons(eh->ether_type)) {
5903 	case ETHERTYPE_IP:
5904 	case ETHERTYPE_IPV6:
5905 		offset = ETHER_HDR_LEN;
5906 		break;
5907 
5908 	case ETHERTYPE_VLAN:
5909 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
5910 		break;
5911 
5912 	default:
5913 		/*
5914 		 * Don't support this protocol or encapsulation.
5915 		 */
5916 		*fieldsp = 0;
5917 		*cmdp = 0;
5918 		return 0;
5919 	}
5920 
5921 	if ((m0->m_pkthdr.csum_flags &
5922 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
5923 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
5924 	} else {
5925 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
5926 	}
5927 	ipcse = offset + iphl - 1;
5928 
5929 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
5930 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
5931 	seg = 0;
5932 	fields = 0;
5933 
5934 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
5935 		int hlen = offset + iphl;
5936 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
5937 
5938 		if (__predict_false(m0->m_len <
5939 				    (hlen + sizeof(struct tcphdr)))) {
5940 			/*
5941 			 * TCP/IP headers are not in the first mbuf; we need
5942 			 * to do this the slow and painful way.  Let's just
5943 			 * hope this doesn't happen very often.
5944 			 */
5945 			struct tcphdr th;
5946 
5947 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
5948 
5949 			m_copydata(m0, hlen, sizeof(th), &th);
5950 			if (v4) {
5951 				struct ip ip;
5952 
5953 				m_copydata(m0, offset, sizeof(ip), &ip);
5954 				ip.ip_len = 0;
5955 				m_copyback(m0,
5956 				    offset + offsetof(struct ip, ip_len),
5957 				    sizeof(ip.ip_len), &ip.ip_len);
5958 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
5959 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
5960 			} else {
5961 				struct ip6_hdr ip6;
5962 
5963 				m_copydata(m0, offset, sizeof(ip6), &ip6);
5964 				ip6.ip6_plen = 0;
5965 				m_copyback(m0,
5966 				    offset + offsetof(struct ip6_hdr, ip6_plen),
5967 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
5968 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
5969 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
5970 			}
5971 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
5972 			    sizeof(th.th_sum), &th.th_sum);
5973 
5974 			hlen += th.th_off << 2;
5975 		} else {
5976 			/*
5977 			 * TCP/IP headers are in the first mbuf; we can do
5978 			 * this the easy way.
5979 			 */
5980 			struct tcphdr *th;
5981 
5982 			if (v4) {
5983 				struct ip *ip =
5984 				    (void *)(mtod(m0, char *) + offset);
5985 				th = (void *)(mtod(m0, char *) + hlen);
5986 
5987 				ip->ip_len = 0;
5988 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
5989 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
5990 			} else {
5991 				struct ip6_hdr *ip6 =
5992 				    (void *)(mtod(m0, char *) + offset);
5993 				th = (void *)(mtod(m0, char *) + hlen);
5994 
5995 				ip6->ip6_plen = 0;
5996 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
5997 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
5998 			}
5999 			hlen += th->th_off << 2;
6000 		}
6001 
6002 		if (v4) {
6003 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
6004 			cmdlen |= WTX_TCPIP_CMD_IP;
6005 		} else {
6006 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6007 			ipcse = 0;
6008 		}
6009 		cmd |= WTX_TCPIP_CMD_TSE;
6010 		cmdlen |= WTX_TCPIP_CMD_TSE |
6011 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6012 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6013 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6014 	}
6015 
6016 	/*
6017 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6018 	 * offload feature, if we load the context descriptor, we
6019 	 * MUST provide valid values for IPCSS and TUCSS fields.
6020 	 */
6021 
6022 	ipcs = WTX_TCPIP_IPCSS(offset) |
6023 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6024 	    WTX_TCPIP_IPCSE(ipcse);
6025 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6026 		WM_EVCNT_INCR(&sc->sc_ev_txipsum);
6027 		fields |= WTX_IXSM;
6028 	}
6029 
6030 	offset += iphl;
6031 
6032 	if (m0->m_pkthdr.csum_flags &
6033 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6034 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6035 		fields |= WTX_TXSM;
6036 		tucs = WTX_TCPIP_TUCSS(offset) |
6037 		    WTX_TCPIP_TUCSO(offset +
6038 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6039 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6040 	} else if ((m0->m_pkthdr.csum_flags &
6041 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6042 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6043 		fields |= WTX_TXSM;
6044 		tucs = WTX_TCPIP_TUCSS(offset) |
6045 		    WTX_TCPIP_TUCSO(offset +
6046 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6047 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6048 	} else {
6049 		/* Just initialize it to a valid TCP context. */
6050 		tucs = WTX_TCPIP_TUCSS(offset) |
6051 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6052 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6053 	}
6054 
6055 	/* Fill in the context descriptor. */
6056 	t = (struct livengood_tcpip_ctxdesc *)
6057 	    &txq->txq_descs[txq->txq_next];
6058 	t->tcpip_ipcs = htole32(ipcs);
6059 	t->tcpip_tucs = htole32(tucs);
6060 	t->tcpip_cmdlen = htole32(cmdlen);
6061 	t->tcpip_seg = htole32(seg);
6062 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6063 
6064 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6065 	txs->txs_ndesc++;
6066 
6067 	*cmdp = cmd;
6068 	*fieldsp = fields;
6069 
6070 	return 0;
6071 }
6072 
6073 /*
6074  * wm_start:		[ifnet interface function]
6075  *
6076  *	Start packet transmission on the interface.
6077  */
6078 static void
6079 wm_start(struct ifnet *ifp)
6080 {
6081 	struct wm_softc *sc = ifp->if_softc;
6082 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6083 
6084 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6085 
6086 	mutex_enter(txq->txq_lock);
6087 	if (!sc->sc_stopping)
6088 		wm_start_locked(ifp);
6089 	mutex_exit(txq->txq_lock);
6090 }
6091 
6092 static void
6093 wm_start_locked(struct ifnet *ifp)
6094 {
6095 	struct wm_softc *sc = ifp->if_softc;
6096 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6097 	struct mbuf *m0;
6098 	struct m_tag *mtag;
6099 	struct wm_txsoft *txs;
6100 	bus_dmamap_t dmamap;
6101 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6102 	bus_addr_t curaddr;
6103 	bus_size_t seglen, curlen;
6104 	uint32_t cksumcmd;
6105 	uint8_t cksumfields;
6106 
6107 	KASSERT(mutex_owned(txq->txq_lock));
6108 
6109 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6110 		return;
6111 
6112 	/* Remember the previous number of free descriptors. */
6113 	ofree = txq->txq_free;
6114 
6115 	/*
6116 	 * Loop through the send queue, setting up transmit descriptors
6117 	 * until we drain the queue, or use up all available transmit
6118 	 * descriptors.
6119 	 */
6120 	for (;;) {
6121 		m0 = NULL;
6122 
6123 		/* Get a work queue entry. */
6124 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6125 			wm_txeof(sc, txq);
6126 			if (txq->txq_sfree == 0) {
6127 				DPRINTF(WM_DEBUG_TX,
6128 				    ("%s: TX: no free job descriptors\n",
6129 					device_xname(sc->sc_dev)));
6130 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6131 				break;
6132 			}
6133 		}
6134 
6135 		/* Grab a packet off the queue. */
6136 		IFQ_DEQUEUE(&ifp->if_snd, m0);
6137 		if (m0 == NULL)
6138 			break;
6139 
6140 		DPRINTF(WM_DEBUG_TX,
6141 		    ("%s: TX: have packet to transmit: %p\n",
6142 		    device_xname(sc->sc_dev), m0));
6143 
6144 		txs = &txq->txq_soft[txq->txq_snext];
6145 		dmamap = txs->txs_dmamap;
6146 
6147 		use_tso = (m0->m_pkthdr.csum_flags &
6148 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6149 
6150 		/*
6151 		 * So says the Linux driver:
6152 		 * The controller does a simple calculation to make sure
6153 		 * there is enough room in the FIFO before initiating the
6154 		 * DMA for each buffer.  The calc is:
6155 		 *	4 = ceil(buffer len / MSS)
6156 		 * To make sure we don't overrun the FIFO, adjust the max
6157 		 * buffer len if the MSS drops.
6158 		 */
6159 		dmamap->dm_maxsegsz =
6160 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6161 		    ? m0->m_pkthdr.segsz << 2
6162 		    : WTX_MAX_LEN;
6163 
6164 		/*
6165 		 * Load the DMA map.  If this fails, the packet either
6166 		 * didn't fit in the allotted number of segments, or we
6167 		 * were short on resources.  For the too-many-segments
6168 		 * case, we simply report an error and drop the packet,
6169 		 * since we can't sanely copy a jumbo packet to a single
6170 		 * buffer.
6171 		 */
6172 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6173 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6174 		if (error) {
6175 			if (error == EFBIG) {
6176 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6177 				log(LOG_ERR, "%s: Tx packet consumes too many "
6178 				    "DMA segments, dropping...\n",
6179 				    device_xname(sc->sc_dev));
6180 				wm_dump_mbuf_chain(sc, m0);
6181 				m_freem(m0);
6182 				continue;
6183 			}
6184 			/*  Short on resources, just stop for now. */
6185 			DPRINTF(WM_DEBUG_TX,
6186 			    ("%s: TX: dmamap load failed: %d\n",
6187 			    device_xname(sc->sc_dev), error));
6188 			break;
6189 		}
6190 
6191 		segs_needed = dmamap->dm_nsegs;
6192 		if (use_tso) {
6193 			/* For sentinel descriptor; see below. */
6194 			segs_needed++;
6195 		}
6196 
6197 		/*
6198 		 * Ensure we have enough descriptors free to describe
6199 		 * the packet.  Note, we always reserve one descriptor
6200 		 * at the end of the ring due to the semantics of the
6201 		 * TDT register, plus one more in the event we need
6202 		 * to load offload context.
6203 		 */
6204 		if (segs_needed > txq->txq_free - 2) {
6205 			/*
6206 			 * Not enough free descriptors to transmit this
6207 			 * packet.  We haven't committed anything yet,
6208 			 * so just unload the DMA map, put the packet
6209 			 * pack on the queue, and punt.  Notify the upper
6210 			 * layer that there are no more slots left.
6211 			 */
6212 			DPRINTF(WM_DEBUG_TX,
6213 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6214 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6215 			    segs_needed, txq->txq_free - 1));
6216 			ifp->if_flags |= IFF_OACTIVE;
6217 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6218 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6219 			break;
6220 		}
6221 
6222 		/*
6223 		 * Check for 82547 Tx FIFO bug.  We need to do this
6224 		 * once we know we can transmit the packet, since we
6225 		 * do some internal FIFO space accounting here.
6226 		 */
6227 		if (sc->sc_type == WM_T_82547 &&
6228 		    wm_82547_txfifo_bugchk(sc, m0)) {
6229 			DPRINTF(WM_DEBUG_TX,
6230 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
6231 			    device_xname(sc->sc_dev)));
6232 			ifp->if_flags |= IFF_OACTIVE;
6233 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6234 			WM_EVCNT_INCR(&sc->sc_ev_txfifo_stall);
6235 			break;
6236 		}
6237 
6238 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6239 
6240 		DPRINTF(WM_DEBUG_TX,
6241 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6242 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6243 
6244 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6245 
6246 		/*
6247 		 * Store a pointer to the packet so that we can free it
6248 		 * later.
6249 		 *
6250 		 * Initially, we consider the number of descriptors the
6251 		 * packet uses the number of DMA segments.  This may be
6252 		 * incremented by 1 if we do checksum offload (a descriptor
6253 		 * is used to set the checksum context).
6254 		 */
6255 		txs->txs_mbuf = m0;
6256 		txs->txs_firstdesc = txq->txq_next;
6257 		txs->txs_ndesc = segs_needed;
6258 
6259 		/* Set up offload parameters for this packet. */
6260 		if (m0->m_pkthdr.csum_flags &
6261 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6262 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6263 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6264 			if (wm_tx_offload(sc, txs, &cksumcmd,
6265 					  &cksumfields) != 0) {
6266 				/* Error message already displayed. */
6267 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6268 				continue;
6269 			}
6270 		} else {
6271 			cksumcmd = 0;
6272 			cksumfields = 0;
6273 		}
6274 
6275 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6276 
6277 		/* Sync the DMA map. */
6278 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6279 		    BUS_DMASYNC_PREWRITE);
6280 
6281 		/* Initialize the transmit descriptor. */
6282 		for (nexttx = txq->txq_next, seg = 0;
6283 		     seg < dmamap->dm_nsegs; seg++) {
6284 			for (seglen = dmamap->dm_segs[seg].ds_len,
6285 			     curaddr = dmamap->dm_segs[seg].ds_addr;
6286 			     seglen != 0;
6287 			     curaddr += curlen, seglen -= curlen,
6288 			     nexttx = WM_NEXTTX(txq, nexttx)) {
6289 				curlen = seglen;
6290 
6291 				/*
6292 				 * So says the Linux driver:
6293 				 * Work around for premature descriptor
6294 				 * write-backs in TSO mode.  Append a
6295 				 * 4-byte sentinel descriptor.
6296 				 */
6297 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6298 				    curlen > 8)
6299 					curlen -= 4;
6300 
6301 				wm_set_dma_addr(
6302 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
6303 				txq->txq_descs[nexttx].wtx_cmdlen
6304 				    = htole32(cksumcmd | curlen);
6305 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
6306 				    = 0;
6307 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
6308 				    = cksumfields;
6309 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6310 				lasttx = nexttx;
6311 
6312 				DPRINTF(WM_DEBUG_TX,
6313 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
6314 				     "len %#04zx\n",
6315 				    device_xname(sc->sc_dev), nexttx,
6316 				    (uint64_t)curaddr, curlen));
6317 			}
6318 		}
6319 
6320 		KASSERT(lasttx != -1);
6321 
6322 		/*
6323 		 * Set up the command byte on the last descriptor of
6324 		 * the packet.  If we're in the interrupt delay window,
6325 		 * delay the interrupt.
6326 		 */
6327 		txq->txq_descs[lasttx].wtx_cmdlen |=
6328 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6329 
6330 		/*
6331 		 * If VLANs are enabled and the packet has a VLAN tag, set
6332 		 * up the descriptor to encapsulate the packet for us.
6333 		 *
6334 		 * This is only valid on the last descriptor of the packet.
6335 		 */
6336 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6337 			txq->txq_descs[lasttx].wtx_cmdlen |=
6338 			    htole32(WTX_CMD_VLE);
6339 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6340 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6341 		}
6342 
6343 		txs->txs_lastdesc = lasttx;
6344 
6345 		DPRINTF(WM_DEBUG_TX,
6346 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
6347 		    device_xname(sc->sc_dev),
6348 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6349 
6350 		/* Sync the descriptors we're using. */
6351 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6352 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6353 
6354 		/* Give the packet to the chip. */
6355 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6356 
6357 		DPRINTF(WM_DEBUG_TX,
6358 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6359 
6360 		DPRINTF(WM_DEBUG_TX,
6361 		    ("%s: TX: finished transmitting packet, job %d\n",
6362 		    device_xname(sc->sc_dev), txq->txq_snext));
6363 
6364 		/* Advance the tx pointer. */
6365 		txq->txq_free -= txs->txs_ndesc;
6366 		txq->txq_next = nexttx;
6367 
6368 		txq->txq_sfree--;
6369 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6370 
6371 		/* Pass the packet to any BPF listeners. */
6372 		bpf_mtap(ifp, m0);
6373 	}
6374 
6375 	if (m0 != NULL) {
6376 		ifp->if_flags |= IFF_OACTIVE;
6377 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6378 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6379 			__func__));
6380 		m_freem(m0);
6381 	}
6382 
6383 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6384 		/* No more slots; notify upper layer. */
6385 		ifp->if_flags |= IFF_OACTIVE;
6386 	}
6387 
6388 	if (txq->txq_free != ofree) {
6389 		/* Set a watchdog timer in case the chip flakes out. */
6390 		ifp->if_timer = 5;
6391 	}
6392 }
6393 
6394 /*
6395  * wm_nq_tx_offload:
6396  *
6397  *	Set up TCP/IP checksumming parameters for the
6398  *	specified packet, for NEWQUEUE devices
6399  */
6400 static int
6401 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6402     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6403 {
6404 	struct mbuf *m0 = txs->txs_mbuf;
6405 	struct m_tag *mtag;
6406 	uint32_t vl_len, mssidx, cmdc;
6407 	struct ether_header *eh;
6408 	int offset, iphl;
6409 
6410 	/*
6411 	 * XXX It would be nice if the mbuf pkthdr had offset
6412 	 * fields for the protocol headers.
6413 	 */
6414 	*cmdlenp = 0;
6415 	*fieldsp = 0;
6416 
6417 	eh = mtod(m0, struct ether_header *);
6418 	switch (htons(eh->ether_type)) {
6419 	case ETHERTYPE_IP:
6420 	case ETHERTYPE_IPV6:
6421 		offset = ETHER_HDR_LEN;
6422 		break;
6423 
6424 	case ETHERTYPE_VLAN:
6425 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6426 		break;
6427 
6428 	default:
6429 		/* Don't support this protocol or encapsulation. */
6430 		*do_csum = false;
6431 		return 0;
6432 	}
6433 	*do_csum = true;
6434 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6435 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6436 
6437 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6438 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6439 
6440 	if ((m0->m_pkthdr.csum_flags &
6441 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6442 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6443 	} else {
6444 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6445 	}
6446 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6447 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6448 
6449 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6450 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6451 		     << NQTXC_VLLEN_VLAN_SHIFT);
6452 		*cmdlenp |= NQTX_CMD_VLE;
6453 	}
6454 
6455 	mssidx = 0;
6456 
6457 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6458 		int hlen = offset + iphl;
6459 		int tcp_hlen;
6460 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6461 
6462 		if (__predict_false(m0->m_len <
6463 				    (hlen + sizeof(struct tcphdr)))) {
6464 			/*
6465 			 * TCP/IP headers are not in the first mbuf; we need
6466 			 * to do this the slow and painful way.  Let's just
6467 			 * hope this doesn't happen very often.
6468 			 */
6469 			struct tcphdr th;
6470 
6471 			WM_EVCNT_INCR(&sc->sc_ev_txtsopain);
6472 
6473 			m_copydata(m0, hlen, sizeof(th), &th);
6474 			if (v4) {
6475 				struct ip ip;
6476 
6477 				m_copydata(m0, offset, sizeof(ip), &ip);
6478 				ip.ip_len = 0;
6479 				m_copyback(m0,
6480 				    offset + offsetof(struct ip, ip_len),
6481 				    sizeof(ip.ip_len), &ip.ip_len);
6482 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6483 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6484 			} else {
6485 				struct ip6_hdr ip6;
6486 
6487 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6488 				ip6.ip6_plen = 0;
6489 				m_copyback(m0,
6490 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6491 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6492 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6493 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6494 			}
6495 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6496 			    sizeof(th.th_sum), &th.th_sum);
6497 
6498 			tcp_hlen = th.th_off << 2;
6499 		} else {
6500 			/*
6501 			 * TCP/IP headers are in the first mbuf; we can do
6502 			 * this the easy way.
6503 			 */
6504 			struct tcphdr *th;
6505 
6506 			if (v4) {
6507 				struct ip *ip =
6508 				    (void *)(mtod(m0, char *) + offset);
6509 				th = (void *)(mtod(m0, char *) + hlen);
6510 
6511 				ip->ip_len = 0;
6512 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6513 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6514 			} else {
6515 				struct ip6_hdr *ip6 =
6516 				    (void *)(mtod(m0, char *) + offset);
6517 				th = (void *)(mtod(m0, char *) + hlen);
6518 
6519 				ip6->ip6_plen = 0;
6520 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6521 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6522 			}
6523 			tcp_hlen = th->th_off << 2;
6524 		}
6525 		hlen += tcp_hlen;
6526 		*cmdlenp |= NQTX_CMD_TSE;
6527 
6528 		if (v4) {
6529 			WM_EVCNT_INCR(&sc->sc_ev_txtso);
6530 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6531 		} else {
6532 			WM_EVCNT_INCR(&sc->sc_ev_txtso6);
6533 			*fieldsp |= NQTXD_FIELDS_TUXSM;
6534 		}
6535 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6536 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6537 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6538 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6539 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6540 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6541 	} else {
6542 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6543 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6544 	}
6545 
6546 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6547 		*fieldsp |= NQTXD_FIELDS_IXSM;
6548 		cmdc |= NQTXC_CMD_IP4;
6549 	}
6550 
6551 	if (m0->m_pkthdr.csum_flags &
6552 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6553 		WM_EVCNT_INCR(&sc->sc_ev_txtusum);
6554 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6555 			cmdc |= NQTXC_CMD_TCP;
6556 		} else {
6557 			cmdc |= NQTXC_CMD_UDP;
6558 		}
6559 		cmdc |= NQTXC_CMD_IP4;
6560 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6561 	}
6562 	if (m0->m_pkthdr.csum_flags &
6563 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6564 		WM_EVCNT_INCR(&sc->sc_ev_txtusum6);
6565 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6566 			cmdc |= NQTXC_CMD_TCP;
6567 		} else {
6568 			cmdc |= NQTXC_CMD_UDP;
6569 		}
6570 		cmdc |= NQTXC_CMD_IP6;
6571 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6572 	}
6573 
6574 	/* Fill in the context descriptor. */
6575 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6576 	    htole32(vl_len);
6577 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6578 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6579 	    htole32(cmdc);
6580 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6581 	    htole32(mssidx);
6582 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6583 	DPRINTF(WM_DEBUG_TX,
6584 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6585 	    txq->txq_next, 0, vl_len));
6586 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6587 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6588 	txs->txs_ndesc++;
6589 	return 0;
6590 }
6591 
6592 /*
6593  * wm_nq_start:		[ifnet interface function]
6594  *
6595  *	Start packet transmission on the interface for NEWQUEUE devices
6596  */
6597 static void
6598 wm_nq_start(struct ifnet *ifp)
6599 {
6600 	struct wm_softc *sc = ifp->if_softc;
6601 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6602 
6603 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6604 
6605 	mutex_enter(txq->txq_lock);
6606 	if (!sc->sc_stopping)
6607 		wm_nq_start_locked(ifp);
6608 	mutex_exit(txq->txq_lock);
6609 }
6610 
6611 static void
6612 wm_nq_start_locked(struct ifnet *ifp)
6613 {
6614 	struct wm_softc *sc = ifp->if_softc;
6615 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6616 
6617 	wm_nq_send_common_locked(ifp, txq, false);
6618 }
6619 
6620 static inline int
6621 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6622 {
6623 	struct wm_softc *sc = ifp->if_softc;
6624 	u_int cpuid = cpu_index(curcpu());
6625 
6626 	/*
6627 	 * Currently, simple distribute strategy.
6628 	 * TODO:
6629 	 * destribute by flowid(RSS has value).
6630 	 */
6631 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
6632 }
6633 
6634 static int
6635 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
6636 {
6637 	int qid;
6638 	struct wm_softc *sc = ifp->if_softc;
6639 	struct wm_txqueue *txq;
6640 
6641 	qid = wm_nq_select_txqueue(ifp, m);
6642 	txq = &sc->sc_queue[qid].wmq_txq;
6643 
6644 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6645 		m_freem(m);
6646 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6647 		return ENOBUFS;
6648 	}
6649 
6650 	if (mutex_tryenter(txq->txq_lock)) {
6651 		/* XXXX should be per TX queue */
6652 		ifp->if_obytes += m->m_pkthdr.len;
6653 		if (m->m_flags & M_MCAST)
6654 			ifp->if_omcasts++;
6655 
6656 		if (!sc->sc_stopping)
6657 			wm_nq_transmit_locked(ifp, txq);
6658 		mutex_exit(txq->txq_lock);
6659 	}
6660 
6661 	return 0;
6662 }
6663 
6664 static void
6665 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6666 {
6667 
6668 	wm_nq_send_common_locked(ifp, txq, true);
6669 }
6670 
6671 static void
6672 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6673     bool is_transmit)
6674 {
6675 	struct wm_softc *sc = ifp->if_softc;
6676 	struct mbuf *m0;
6677 	struct m_tag *mtag;
6678 	struct wm_txsoft *txs;
6679 	bus_dmamap_t dmamap;
6680 	int error, nexttx, lasttx = -1, seg, segs_needed;
6681 	bool do_csum, sent;
6682 
6683 	KASSERT(mutex_owned(txq->txq_lock));
6684 
6685 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6686 		return;
6687 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
6688 		return;
6689 
6690 	sent = false;
6691 
6692 	/*
6693 	 * Loop through the send queue, setting up transmit descriptors
6694 	 * until we drain the queue, or use up all available transmit
6695 	 * descriptors.
6696 	 */
6697 	for (;;) {
6698 		m0 = NULL;
6699 
6700 		/* Get a work queue entry. */
6701 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6702 			wm_txeof(sc, txq);
6703 			if (txq->txq_sfree == 0) {
6704 				DPRINTF(WM_DEBUG_TX,
6705 				    ("%s: TX: no free job descriptors\n",
6706 					device_xname(sc->sc_dev)));
6707 				WM_EVCNT_INCR(&sc->sc_ev_txsstall);
6708 				break;
6709 			}
6710 		}
6711 
6712 		/* Grab a packet off the queue. */
6713 		if (is_transmit)
6714 			m0 = pcq_get(txq->txq_interq);
6715 		else
6716 			IFQ_DEQUEUE(&ifp->if_snd, m0);
6717 		if (m0 == NULL)
6718 			break;
6719 
6720 		DPRINTF(WM_DEBUG_TX,
6721 		    ("%s: TX: have packet to transmit: %p\n",
6722 		    device_xname(sc->sc_dev), m0));
6723 
6724 		txs = &txq->txq_soft[txq->txq_snext];
6725 		dmamap = txs->txs_dmamap;
6726 
6727 		/*
6728 		 * Load the DMA map.  If this fails, the packet either
6729 		 * didn't fit in the allotted number of segments, or we
6730 		 * were short on resources.  For the too-many-segments
6731 		 * case, we simply report an error and drop the packet,
6732 		 * since we can't sanely copy a jumbo packet to a single
6733 		 * buffer.
6734 		 */
6735 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6736 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6737 		if (error) {
6738 			if (error == EFBIG) {
6739 				WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6740 				log(LOG_ERR, "%s: Tx packet consumes too many "
6741 				    "DMA segments, dropping...\n",
6742 				    device_xname(sc->sc_dev));
6743 				wm_dump_mbuf_chain(sc, m0);
6744 				m_freem(m0);
6745 				continue;
6746 			}
6747 			/* Short on resources, just stop for now. */
6748 			DPRINTF(WM_DEBUG_TX,
6749 			    ("%s: TX: dmamap load failed: %d\n",
6750 			    device_xname(sc->sc_dev), error));
6751 			break;
6752 		}
6753 
6754 		segs_needed = dmamap->dm_nsegs;
6755 
6756 		/*
6757 		 * Ensure we have enough descriptors free to describe
6758 		 * the packet.  Note, we always reserve one descriptor
6759 		 * at the end of the ring due to the semantics of the
6760 		 * TDT register, plus one more in the event we need
6761 		 * to load offload context.
6762 		 */
6763 		if (segs_needed > txq->txq_free - 2) {
6764 			/*
6765 			 * Not enough free descriptors to transmit this
6766 			 * packet.  We haven't committed anything yet,
6767 			 * so just unload the DMA map, put the packet
6768 			 * pack on the queue, and punt.  Notify the upper
6769 			 * layer that there are no more slots left.
6770 			 */
6771 			DPRINTF(WM_DEBUG_TX,
6772 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6773 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6774 			    segs_needed, txq->txq_free - 1));
6775 			txq->txq_flags |= WM_TXQ_NO_SPACE;
6776 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6777 			WM_EVCNT_INCR(&sc->sc_ev_txdstall);
6778 			break;
6779 		}
6780 
6781 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6782 
6783 		DPRINTF(WM_DEBUG_TX,
6784 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6785 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6786 
6787 		WM_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]);
6788 
6789 		/*
6790 		 * Store a pointer to the packet so that we can free it
6791 		 * later.
6792 		 *
6793 		 * Initially, we consider the number of descriptors the
6794 		 * packet uses the number of DMA segments.  This may be
6795 		 * incremented by 1 if we do checksum offload (a descriptor
6796 		 * is used to set the checksum context).
6797 		 */
6798 		txs->txs_mbuf = m0;
6799 		txs->txs_firstdesc = txq->txq_next;
6800 		txs->txs_ndesc = segs_needed;
6801 
6802 		/* Set up offload parameters for this packet. */
6803 		uint32_t cmdlen, fields, dcmdlen;
6804 		if (m0->m_pkthdr.csum_flags &
6805 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6806 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6807 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6808 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
6809 			    &do_csum) != 0) {
6810 				/* Error message already displayed. */
6811 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6812 				continue;
6813 			}
6814 		} else {
6815 			do_csum = false;
6816 			cmdlen = 0;
6817 			fields = 0;
6818 		}
6819 
6820 		/* Sync the DMA map. */
6821 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6822 		    BUS_DMASYNC_PREWRITE);
6823 
6824 		/* Initialize the first transmit descriptor. */
6825 		nexttx = txq->txq_next;
6826 		if (!do_csum) {
6827 			/* setup a legacy descriptor */
6828 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
6829 			    dmamap->dm_segs[0].ds_addr);
6830 			txq->txq_descs[nexttx].wtx_cmdlen =
6831 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6832 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6833 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6834 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6835 			    NULL) {
6836 				txq->txq_descs[nexttx].wtx_cmdlen |=
6837 				    htole32(WTX_CMD_VLE);
6838 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6839 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6840 			} else {
6841 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6842 			}
6843 			dcmdlen = 0;
6844 		} else {
6845 			/* setup an advanced data descriptor */
6846 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6847 			    htole64(dmamap->dm_segs[0].ds_addr);
6848 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6849 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6850 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6851 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6852 			    htole32(fields);
6853 			DPRINTF(WM_DEBUG_TX,
6854 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6855 			    device_xname(sc->sc_dev), nexttx,
6856 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
6857 			DPRINTF(WM_DEBUG_TX,
6858 			    ("\t 0x%08x%08x\n", fields,
6859 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6860 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6861 		}
6862 
6863 		lasttx = nexttx;
6864 		nexttx = WM_NEXTTX(txq, nexttx);
6865 		/*
6866 		 * fill in the next descriptors. legacy or adcanced format
6867 		 * is the same here
6868 		 */
6869 		for (seg = 1; seg < dmamap->dm_nsegs;
6870 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
6871 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6872 			    htole64(dmamap->dm_segs[seg].ds_addr);
6873 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6874 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
6875 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
6876 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
6877 			lasttx = nexttx;
6878 
6879 			DPRINTF(WM_DEBUG_TX,
6880 			    ("%s: TX: desc %d: %#" PRIx64 ", "
6881 			     "len %#04zx\n",
6882 			    device_xname(sc->sc_dev), nexttx,
6883 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
6884 			    dmamap->dm_segs[seg].ds_len));
6885 		}
6886 
6887 		KASSERT(lasttx != -1);
6888 
6889 		/*
6890 		 * Set up the command byte on the last descriptor of
6891 		 * the packet.  If we're in the interrupt delay window,
6892 		 * delay the interrupt.
6893 		 */
6894 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
6895 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
6896 		txq->txq_descs[lasttx].wtx_cmdlen |=
6897 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6898 
6899 		txs->txs_lastdesc = lasttx;
6900 
6901 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
6902 		    device_xname(sc->sc_dev),
6903 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6904 
6905 		/* Sync the descriptors we're using. */
6906 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6907 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6908 
6909 		/* Give the packet to the chip. */
6910 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6911 		sent = true;
6912 
6913 		DPRINTF(WM_DEBUG_TX,
6914 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6915 
6916 		DPRINTF(WM_DEBUG_TX,
6917 		    ("%s: TX: finished transmitting packet, job %d\n",
6918 		    device_xname(sc->sc_dev), txq->txq_snext));
6919 
6920 		/* Advance the tx pointer. */
6921 		txq->txq_free -= txs->txs_ndesc;
6922 		txq->txq_next = nexttx;
6923 
6924 		txq->txq_sfree--;
6925 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6926 
6927 		/* Pass the packet to any BPF listeners. */
6928 		bpf_mtap(ifp, m0);
6929 	}
6930 
6931 	if (m0 != NULL) {
6932 		txq->txq_flags |= WM_TXQ_NO_SPACE;
6933 		WM_EVCNT_INCR(&sc->sc_ev_txdrop);
6934 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6935 			__func__));
6936 		m_freem(m0);
6937 	}
6938 
6939 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6940 		/* No more slots; notify upper layer. */
6941 		txq->txq_flags |= WM_TXQ_NO_SPACE;
6942 	}
6943 
6944 	if (sent) {
6945 		/* Set a watchdog timer in case the chip flakes out. */
6946 		ifp->if_timer = 5;
6947 	}
6948 }
6949 
6950 /* Interrupt */
6951 
6952 /*
6953  * wm_txeof:
6954  *
6955  *	Helper; handle transmit interrupts.
6956  */
6957 static int
6958 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
6959 {
6960 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
6961 	struct wm_txsoft *txs;
6962 	bool processed = false;
6963 	int count = 0;
6964 	int i;
6965 	uint8_t status;
6966 
6967 	KASSERT(mutex_owned(txq->txq_lock));
6968 
6969 	if (sc->sc_stopping)
6970 		return 0;
6971 
6972 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6973 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
6974 	else
6975 		ifp->if_flags &= ~IFF_OACTIVE;
6976 
6977 	/*
6978 	 * Go through the Tx list and free mbufs for those
6979 	 * frames which have been transmitted.
6980 	 */
6981 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
6982 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
6983 		txs = &txq->txq_soft[i];
6984 
6985 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
6986 			device_xname(sc->sc_dev), i));
6987 
6988 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
6989 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
6990 
6991 		status =
6992 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
6993 		if ((status & WTX_ST_DD) == 0) {
6994 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
6995 			    BUS_DMASYNC_PREREAD);
6996 			break;
6997 		}
6998 
6999 		processed = true;
7000 		count++;
7001 		DPRINTF(WM_DEBUG_TX,
7002 		    ("%s: TX: job %d done: descs %d..%d\n",
7003 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7004 		    txs->txs_lastdesc));
7005 
7006 		/*
7007 		 * XXX We should probably be using the statistics
7008 		 * XXX registers, but I don't know if they exist
7009 		 * XXX on chips before the i82544.
7010 		 */
7011 
7012 #ifdef WM_EVENT_COUNTERS
7013 		if (status & WTX_ST_TU)
7014 			WM_EVCNT_INCR(&sc->sc_ev_tu);
7015 #endif /* WM_EVENT_COUNTERS */
7016 
7017 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
7018 			ifp->if_oerrors++;
7019 			if (status & WTX_ST_LC)
7020 				log(LOG_WARNING, "%s: late collision\n",
7021 				    device_xname(sc->sc_dev));
7022 			else if (status & WTX_ST_EC) {
7023 				ifp->if_collisions += 16;
7024 				log(LOG_WARNING, "%s: excessive collisions\n",
7025 				    device_xname(sc->sc_dev));
7026 			}
7027 		} else
7028 			ifp->if_opackets++;
7029 
7030 		txq->txq_free += txs->txs_ndesc;
7031 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7032 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7033 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7034 		m_freem(txs->txs_mbuf);
7035 		txs->txs_mbuf = NULL;
7036 	}
7037 
7038 	/* Update the dirty transmit buffer pointer. */
7039 	txq->txq_sdirty = i;
7040 	DPRINTF(WM_DEBUG_TX,
7041 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7042 
7043 	if (count != 0)
7044 		rnd_add_uint32(&sc->rnd_source, count);
7045 
7046 	/*
7047 	 * If there are no more pending transmissions, cancel the watchdog
7048 	 * timer.
7049 	 */
7050 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7051 		ifp->if_timer = 0;
7052 
7053 	return processed;
7054 }
7055 
7056 /*
7057  * wm_rxeof:
7058  *
7059  *	Helper; handle receive interrupts.
7060  */
7061 static void
7062 wm_rxeof(struct wm_rxqueue *rxq)
7063 {
7064 	struct wm_softc *sc = rxq->rxq_sc;
7065 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7066 	struct wm_rxsoft *rxs;
7067 	struct mbuf *m;
7068 	int i, len;
7069 	int count = 0;
7070 	uint8_t status, errors;
7071 	uint16_t vlantag;
7072 
7073 	KASSERT(mutex_owned(rxq->rxq_lock));
7074 
7075 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7076 		rxs = &rxq->rxq_soft[i];
7077 
7078 		DPRINTF(WM_DEBUG_RX,
7079 		    ("%s: RX: checking descriptor %d\n",
7080 		    device_xname(sc->sc_dev), i));
7081 
7082 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7083 
7084 		status = rxq->rxq_descs[i].wrx_status;
7085 		errors = rxq->rxq_descs[i].wrx_errors;
7086 		len = le16toh(rxq->rxq_descs[i].wrx_len);
7087 		vlantag = rxq->rxq_descs[i].wrx_special;
7088 
7089 		if ((status & WRX_ST_DD) == 0) {
7090 			/* We have processed all of the receive descriptors. */
7091 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
7092 			break;
7093 		}
7094 
7095 		count++;
7096 		if (__predict_false(rxq->rxq_discard)) {
7097 			DPRINTF(WM_DEBUG_RX,
7098 			    ("%s: RX: discarding contents of descriptor %d\n",
7099 			    device_xname(sc->sc_dev), i));
7100 			wm_init_rxdesc(rxq, i);
7101 			if (status & WRX_ST_EOP) {
7102 				/* Reset our state. */
7103 				DPRINTF(WM_DEBUG_RX,
7104 				    ("%s: RX: resetting rxdiscard -> 0\n",
7105 				    device_xname(sc->sc_dev)));
7106 				rxq->rxq_discard = 0;
7107 			}
7108 			continue;
7109 		}
7110 
7111 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7112 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7113 
7114 		m = rxs->rxs_mbuf;
7115 
7116 		/*
7117 		 * Add a new receive buffer to the ring, unless of
7118 		 * course the length is zero. Treat the latter as a
7119 		 * failed mapping.
7120 		 */
7121 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7122 			/*
7123 			 * Failed, throw away what we've done so
7124 			 * far, and discard the rest of the packet.
7125 			 */
7126 			ifp->if_ierrors++;
7127 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7128 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7129 			wm_init_rxdesc(rxq, i);
7130 			if ((status & WRX_ST_EOP) == 0)
7131 				rxq->rxq_discard = 1;
7132 			if (rxq->rxq_head != NULL)
7133 				m_freem(rxq->rxq_head);
7134 			WM_RXCHAIN_RESET(rxq);
7135 			DPRINTF(WM_DEBUG_RX,
7136 			    ("%s: RX: Rx buffer allocation failed, "
7137 			    "dropping packet%s\n", device_xname(sc->sc_dev),
7138 			    rxq->rxq_discard ? " (discard)" : ""));
7139 			continue;
7140 		}
7141 
7142 		m->m_len = len;
7143 		rxq->rxq_len += len;
7144 		DPRINTF(WM_DEBUG_RX,
7145 		    ("%s: RX: buffer at %p len %d\n",
7146 		    device_xname(sc->sc_dev), m->m_data, len));
7147 
7148 		/* If this is not the end of the packet, keep looking. */
7149 		if ((status & WRX_ST_EOP) == 0) {
7150 			WM_RXCHAIN_LINK(rxq, m);
7151 			DPRINTF(WM_DEBUG_RX,
7152 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
7153 			    device_xname(sc->sc_dev), rxq->rxq_len));
7154 			continue;
7155 		}
7156 
7157 		/*
7158 		 * Okay, we have the entire packet now.  The chip is
7159 		 * configured to include the FCS except I350 and I21[01]
7160 		 * (not all chips can be configured to strip it),
7161 		 * so we need to trim it.
7162 		 * May need to adjust length of previous mbuf in the
7163 		 * chain if the current mbuf is too short.
7164 		 * For an eratta, the RCTL_SECRC bit in RCTL register
7165 		 * is always set in I350, so we don't trim it.
7166 		 */
7167 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7168 		    && (sc->sc_type != WM_T_I210)
7169 		    && (sc->sc_type != WM_T_I211)) {
7170 			if (m->m_len < ETHER_CRC_LEN) {
7171 				rxq->rxq_tail->m_len
7172 				    -= (ETHER_CRC_LEN - m->m_len);
7173 				m->m_len = 0;
7174 			} else
7175 				m->m_len -= ETHER_CRC_LEN;
7176 			len = rxq->rxq_len - ETHER_CRC_LEN;
7177 		} else
7178 			len = rxq->rxq_len;
7179 
7180 		WM_RXCHAIN_LINK(rxq, m);
7181 
7182 		*rxq->rxq_tailp = NULL;
7183 		m = rxq->rxq_head;
7184 
7185 		WM_RXCHAIN_RESET(rxq);
7186 
7187 		DPRINTF(WM_DEBUG_RX,
7188 		    ("%s: RX: have entire packet, len -> %d\n",
7189 		    device_xname(sc->sc_dev), len));
7190 
7191 		/* If an error occurred, update stats and drop the packet. */
7192 		if (errors &
7193 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7194 			if (errors & WRX_ER_SE)
7195 				log(LOG_WARNING, "%s: symbol error\n",
7196 				    device_xname(sc->sc_dev));
7197 			else if (errors & WRX_ER_SEQ)
7198 				log(LOG_WARNING, "%s: receive sequence error\n",
7199 				    device_xname(sc->sc_dev));
7200 			else if (errors & WRX_ER_CE)
7201 				log(LOG_WARNING, "%s: CRC error\n",
7202 				    device_xname(sc->sc_dev));
7203 			m_freem(m);
7204 			continue;
7205 		}
7206 
7207 		/* No errors.  Receive the packet. */
7208 		m_set_rcvif(m, ifp);
7209 		m->m_pkthdr.len = len;
7210 
7211 		/*
7212 		 * If VLANs are enabled, VLAN packets have been unwrapped
7213 		 * for us.  Associate the tag with the packet.
7214 		 */
7215 		/* XXXX should check for i350 and i354 */
7216 		if ((status & WRX_ST_VP) != 0) {
7217 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7218 		}
7219 
7220 		/* Set up checksum info for this packet. */
7221 		if ((status & WRX_ST_IXSM) == 0) {
7222 			if (status & WRX_ST_IPCS) {
7223 				WM_EVCNT_INCR(&sc->sc_ev_rxipsum);
7224 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7225 				if (errors & WRX_ER_IPE)
7226 					m->m_pkthdr.csum_flags |=
7227 					    M_CSUM_IPv4_BAD;
7228 			}
7229 			if (status & WRX_ST_TCPCS) {
7230 				/*
7231 				 * Note: we don't know if this was TCP or UDP,
7232 				 * so we just set both bits, and expect the
7233 				 * upper layers to deal.
7234 				 */
7235 				WM_EVCNT_INCR(&sc->sc_ev_rxtusum);
7236 				m->m_pkthdr.csum_flags |=
7237 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7238 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
7239 				if (errors & WRX_ER_TCPE)
7240 					m->m_pkthdr.csum_flags |=
7241 					    M_CSUM_TCP_UDP_BAD;
7242 			}
7243 		}
7244 
7245 		ifp->if_ipackets++;
7246 
7247 		mutex_exit(rxq->rxq_lock);
7248 
7249 		/* Pass this up to any BPF listeners. */
7250 		bpf_mtap(ifp, m);
7251 
7252 		/* Pass it on. */
7253 		if_percpuq_enqueue(sc->sc_ipq, m);
7254 
7255 		mutex_enter(rxq->rxq_lock);
7256 
7257 		if (sc->sc_stopping)
7258 			break;
7259 	}
7260 
7261 	/* Update the receive pointer. */
7262 	rxq->rxq_ptr = i;
7263 	if (count != 0)
7264 		rnd_add_uint32(&sc->rnd_source, count);
7265 
7266 	DPRINTF(WM_DEBUG_RX,
7267 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7268 }
7269 
7270 /*
7271  * wm_linkintr_gmii:
7272  *
7273  *	Helper; handle link interrupts for GMII.
7274  */
7275 static void
7276 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7277 {
7278 
7279 	KASSERT(WM_CORE_LOCKED(sc));
7280 
7281 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7282 		__func__));
7283 
7284 	if (icr & ICR_LSC) {
7285 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
7286 
7287 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7288 			wm_gig_downshift_workaround_ich8lan(sc);
7289 
7290 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7291 			device_xname(sc->sc_dev)));
7292 		mii_pollstat(&sc->sc_mii);
7293 		if (sc->sc_type == WM_T_82543) {
7294 			int miistatus, active;
7295 
7296 			/*
7297 			 * With 82543, we need to force speed and
7298 			 * duplex on the MAC equal to what the PHY
7299 			 * speed and duplex configuration is.
7300 			 */
7301 			miistatus = sc->sc_mii.mii_media_status;
7302 
7303 			if (miistatus & IFM_ACTIVE) {
7304 				active = sc->sc_mii.mii_media_active;
7305 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7306 				switch (IFM_SUBTYPE(active)) {
7307 				case IFM_10_T:
7308 					sc->sc_ctrl |= CTRL_SPEED_10;
7309 					break;
7310 				case IFM_100_TX:
7311 					sc->sc_ctrl |= CTRL_SPEED_100;
7312 					break;
7313 				case IFM_1000_T:
7314 					sc->sc_ctrl |= CTRL_SPEED_1000;
7315 					break;
7316 				default:
7317 					/*
7318 					 * fiber?
7319 					 * Shoud not enter here.
7320 					 */
7321 					printf("unknown media (%x)\n", active);
7322 					break;
7323 				}
7324 				if (active & IFM_FDX)
7325 					sc->sc_ctrl |= CTRL_FD;
7326 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7327 			}
7328 		} else if ((sc->sc_type == WM_T_ICH8)
7329 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
7330 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
7331 		} else if (sc->sc_type == WM_T_PCH) {
7332 			wm_k1_gig_workaround_hv(sc,
7333 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7334 		}
7335 
7336 		if ((sc->sc_phytype == WMPHY_82578)
7337 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7338 			== IFM_1000_T)) {
7339 
7340 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7341 				delay(200*1000); /* XXX too big */
7342 
7343 				/* Link stall fix for link up */
7344 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7345 				    HV_MUX_DATA_CTRL,
7346 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
7347 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
7348 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7349 				    HV_MUX_DATA_CTRL,
7350 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
7351 			}
7352 		}
7353 	} else if (icr & ICR_RXSEQ) {
7354 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7355 			device_xname(sc->sc_dev)));
7356 	}
7357 }
7358 
7359 /*
7360  * wm_linkintr_tbi:
7361  *
7362  *	Helper; handle link interrupts for TBI mode.
7363  */
7364 static void
7365 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7366 {
7367 	uint32_t status;
7368 
7369 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7370 		__func__));
7371 
7372 	status = CSR_READ(sc, WMREG_STATUS);
7373 	if (icr & ICR_LSC) {
7374 		if (status & STATUS_LU) {
7375 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7376 			    device_xname(sc->sc_dev),
7377 			    (status & STATUS_FD) ? "FDX" : "HDX"));
7378 			/*
7379 			 * NOTE: CTRL will update TFCE and RFCE automatically,
7380 			 * so we should update sc->sc_ctrl
7381 			 */
7382 
7383 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7384 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7385 			sc->sc_fcrtl &= ~FCRTL_XONE;
7386 			if (status & STATUS_FD)
7387 				sc->sc_tctl |=
7388 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7389 			else
7390 				sc->sc_tctl |=
7391 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7392 			if (sc->sc_ctrl & CTRL_TFCE)
7393 				sc->sc_fcrtl |= FCRTL_XONE;
7394 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7395 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7396 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
7397 				      sc->sc_fcrtl);
7398 			sc->sc_tbi_linkup = 1;
7399 		} else {
7400 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7401 			    device_xname(sc->sc_dev)));
7402 			sc->sc_tbi_linkup = 0;
7403 		}
7404 		/* Update LED */
7405 		wm_tbi_serdes_set_linkled(sc);
7406 	} else if (icr & ICR_RXSEQ) {
7407 		DPRINTF(WM_DEBUG_LINK,
7408 		    ("%s: LINK: Receive sequence error\n",
7409 		    device_xname(sc->sc_dev)));
7410 	}
7411 }
7412 
7413 /*
7414  * wm_linkintr_serdes:
7415  *
7416  *	Helper; handle link interrupts for TBI mode.
7417  */
7418 static void
7419 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7420 {
7421 	struct mii_data *mii = &sc->sc_mii;
7422 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7423 	uint32_t pcs_adv, pcs_lpab, reg;
7424 
7425 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7426 		__func__));
7427 
7428 	if (icr & ICR_LSC) {
7429 		/* Check PCS */
7430 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
7431 		if ((reg & PCS_LSTS_LINKOK) != 0) {
7432 			mii->mii_media_status |= IFM_ACTIVE;
7433 			sc->sc_tbi_linkup = 1;
7434 		} else {
7435 			mii->mii_media_status |= IFM_NONE;
7436 			sc->sc_tbi_linkup = 0;
7437 			wm_tbi_serdes_set_linkled(sc);
7438 			return;
7439 		}
7440 		mii->mii_media_active |= IFM_1000_SX;
7441 		if ((reg & PCS_LSTS_FDX) != 0)
7442 			mii->mii_media_active |= IFM_FDX;
7443 		else
7444 			mii->mii_media_active |= IFM_HDX;
7445 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7446 			/* Check flow */
7447 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
7448 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
7449 				DPRINTF(WM_DEBUG_LINK,
7450 				    ("XXX LINKOK but not ACOMP\n"));
7451 				return;
7452 			}
7453 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7454 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7455 			DPRINTF(WM_DEBUG_LINK,
7456 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7457 			if ((pcs_adv & TXCW_SYM_PAUSE)
7458 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
7459 				mii->mii_media_active |= IFM_FLOW
7460 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7461 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7462 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7463 			    && (pcs_lpab & TXCW_SYM_PAUSE)
7464 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7465 				mii->mii_media_active |= IFM_FLOW
7466 				    | IFM_ETH_TXPAUSE;
7467 			else if ((pcs_adv & TXCW_SYM_PAUSE)
7468 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7469 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7470 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7471 				mii->mii_media_active |= IFM_FLOW
7472 				    | IFM_ETH_RXPAUSE;
7473 		}
7474 		/* Update LED */
7475 		wm_tbi_serdes_set_linkled(sc);
7476 	} else {
7477 		DPRINTF(WM_DEBUG_LINK,
7478 		    ("%s: LINK: Receive sequence error\n",
7479 		    device_xname(sc->sc_dev)));
7480 	}
7481 }
7482 
7483 /*
7484  * wm_linkintr:
7485  *
7486  *	Helper; handle link interrupts.
7487  */
7488 static void
7489 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7490 {
7491 
7492 	KASSERT(WM_CORE_LOCKED(sc));
7493 
7494 	if (sc->sc_flags & WM_F_HAS_MII)
7495 		wm_linkintr_gmii(sc, icr);
7496 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7497 	    && (sc->sc_type >= WM_T_82575))
7498 		wm_linkintr_serdes(sc, icr);
7499 	else
7500 		wm_linkintr_tbi(sc, icr);
7501 }
7502 
7503 /*
7504  * wm_intr_legacy:
7505  *
7506  *	Interrupt service routine for INTx and MSI.
7507  */
7508 static int
7509 wm_intr_legacy(void *arg)
7510 {
7511 	struct wm_softc *sc = arg;
7512 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7513 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
7514 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7515 	uint32_t icr, rndval = 0;
7516 	int handled = 0;
7517 
7518 	DPRINTF(WM_DEBUG_TX,
7519 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7520 	while (1 /* CONSTCOND */) {
7521 		icr = CSR_READ(sc, WMREG_ICR);
7522 		if ((icr & sc->sc_icr) == 0)
7523 			break;
7524 		if (rndval == 0)
7525 			rndval = icr;
7526 
7527 		mutex_enter(rxq->rxq_lock);
7528 
7529 		if (sc->sc_stopping) {
7530 			mutex_exit(rxq->rxq_lock);
7531 			break;
7532 		}
7533 
7534 		handled = 1;
7535 
7536 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7537 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7538 			DPRINTF(WM_DEBUG_RX,
7539 			    ("%s: RX: got Rx intr 0x%08x\n",
7540 			    device_xname(sc->sc_dev),
7541 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
7542 			WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7543 		}
7544 #endif
7545 		wm_rxeof(rxq);
7546 
7547 		mutex_exit(rxq->rxq_lock);
7548 		mutex_enter(txq->txq_lock);
7549 
7550 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7551 		if (icr & ICR_TXDW) {
7552 			DPRINTF(WM_DEBUG_TX,
7553 			    ("%s: TX: got TXDW interrupt\n",
7554 			    device_xname(sc->sc_dev)));
7555 			WM_EVCNT_INCR(&sc->sc_ev_txdw);
7556 		}
7557 #endif
7558 		wm_txeof(sc, txq);
7559 
7560 		mutex_exit(txq->txq_lock);
7561 		WM_CORE_LOCK(sc);
7562 
7563 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
7564 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7565 			wm_linkintr(sc, icr);
7566 		}
7567 
7568 		WM_CORE_UNLOCK(sc);
7569 
7570 		if (icr & ICR_RXO) {
7571 #if defined(WM_DEBUG)
7572 			log(LOG_WARNING, "%s: Receive overrun\n",
7573 			    device_xname(sc->sc_dev));
7574 #endif /* defined(WM_DEBUG) */
7575 		}
7576 	}
7577 
7578 	rnd_add_uint32(&sc->rnd_source, rndval);
7579 
7580 	if (handled) {
7581 		/* Try to get more packets going. */
7582 		ifp->if_start(ifp);
7583 	}
7584 
7585 	return handled;
7586 }
7587 
7588 static int
7589 wm_txrxintr_msix(void *arg)
7590 {
7591 	struct wm_queue *wmq = arg;
7592 	struct wm_txqueue *txq = &wmq->wmq_txq;
7593 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7594 	struct wm_softc *sc = txq->txq_sc;
7595 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7596 
7597 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
7598 
7599 	DPRINTF(WM_DEBUG_TX,
7600 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7601 
7602 	if (sc->sc_type == WM_T_82574)
7603 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7604 	else if (sc->sc_type == WM_T_82575)
7605 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7606 	else
7607 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
7608 
7609 	if (!sc->sc_stopping) {
7610 		mutex_enter(txq->txq_lock);
7611 
7612 		WM_EVCNT_INCR(&sc->sc_ev_txdw);
7613 		wm_txeof(sc, txq);
7614 
7615 		/* Try to get more packets going. */
7616 		if (pcq_peek(txq->txq_interq) != NULL)
7617 			wm_nq_transmit_locked(ifp, txq);
7618 		/*
7619 		 * There are still some upper layer processing which call
7620 		 * ifp->if_start(). e.g. ALTQ
7621 		 */
7622 		if (wmq->wmq_id == 0) {
7623 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
7624 				wm_nq_start_locked(ifp);
7625 		}
7626 		mutex_exit(txq->txq_lock);
7627 	}
7628 
7629 	DPRINTF(WM_DEBUG_RX,
7630 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7631 
7632 	if (!sc->sc_stopping) {
7633 		mutex_enter(rxq->rxq_lock);
7634 		WM_EVCNT_INCR(&sc->sc_ev_rxintr);
7635 		wm_rxeof(rxq);
7636 		mutex_exit(rxq->rxq_lock);
7637 	}
7638 
7639 	if (sc->sc_type == WM_T_82574)
7640 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7641 	else if (sc->sc_type == WM_T_82575)
7642 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7643 	else
7644 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
7645 
7646 	return 1;
7647 }
7648 
7649 /*
7650  * wm_linkintr_msix:
7651  *
7652  *	Interrupt service routine for link status change for MSI-X.
7653  */
7654 static int
7655 wm_linkintr_msix(void *arg)
7656 {
7657 	struct wm_softc *sc = arg;
7658 	uint32_t reg;
7659 
7660 	DPRINTF(WM_DEBUG_LINK,
7661 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7662 
7663 	reg = CSR_READ(sc, WMREG_ICR);
7664 	WM_CORE_LOCK(sc);
7665 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7666 		goto out;
7667 
7668 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7669 	wm_linkintr(sc, ICR_LSC);
7670 
7671 out:
7672 	WM_CORE_UNLOCK(sc);
7673 
7674 	if (sc->sc_type == WM_T_82574)
7675 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
7676 	else if (sc->sc_type == WM_T_82575)
7677 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7678 	else
7679 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7680 
7681 	return 1;
7682 }
7683 
7684 /*
7685  * Media related.
7686  * GMII, SGMII, TBI (and SERDES)
7687  */
7688 
7689 /* Common */
7690 
7691 /*
7692  * wm_tbi_serdes_set_linkled:
7693  *
7694  *	Update the link LED on TBI and SERDES devices.
7695  */
7696 static void
7697 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7698 {
7699 
7700 	if (sc->sc_tbi_linkup)
7701 		sc->sc_ctrl |= CTRL_SWDPIN(0);
7702 	else
7703 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7704 
7705 	/* 82540 or newer devices are active low */
7706 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7707 
7708 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7709 }
7710 
7711 /* GMII related */
7712 
7713 /*
7714  * wm_gmii_reset:
7715  *
7716  *	Reset the PHY.
7717  */
7718 static void
7719 wm_gmii_reset(struct wm_softc *sc)
7720 {
7721 	uint32_t reg;
7722 	int rv;
7723 
7724 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7725 		device_xname(sc->sc_dev), __func__));
7726 	/* get phy semaphore */
7727 	switch (sc->sc_type) {
7728 	case WM_T_82571:
7729 	case WM_T_82572:
7730 	case WM_T_82573:
7731 	case WM_T_82574:
7732 	case WM_T_82583:
7733 		 /* XXX should get sw semaphore, too */
7734 		rv = wm_get_swsm_semaphore(sc);
7735 		break;
7736 	case WM_T_82575:
7737 	case WM_T_82576:
7738 	case WM_T_82580:
7739 	case WM_T_I350:
7740 	case WM_T_I354:
7741 	case WM_T_I210:
7742 	case WM_T_I211:
7743 	case WM_T_80003:
7744 		rv = wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7745 		break;
7746 	case WM_T_ICH8:
7747 	case WM_T_ICH9:
7748 	case WM_T_ICH10:
7749 	case WM_T_PCH:
7750 	case WM_T_PCH2:
7751 	case WM_T_PCH_LPT:
7752 	case WM_T_PCH_SPT:
7753 		rv = wm_get_swfwhw_semaphore(sc);
7754 		break;
7755 	default:
7756 		/* nothing to do*/
7757 		rv = 0;
7758 		break;
7759 	}
7760 	if (rv != 0) {
7761 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7762 		    __func__);
7763 		return;
7764 	}
7765 
7766 	switch (sc->sc_type) {
7767 	case WM_T_82542_2_0:
7768 	case WM_T_82542_2_1:
7769 		/* null */
7770 		break;
7771 	case WM_T_82543:
7772 		/*
7773 		 * With 82543, we need to force speed and duplex on the MAC
7774 		 * equal to what the PHY speed and duplex configuration is.
7775 		 * In addition, we need to perform a hardware reset on the PHY
7776 		 * to take it out of reset.
7777 		 */
7778 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7779 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7780 
7781 		/* The PHY reset pin is active-low. */
7782 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7783 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7784 		    CTRL_EXT_SWDPIN(4));
7785 		reg |= CTRL_EXT_SWDPIO(4);
7786 
7787 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7788 		CSR_WRITE_FLUSH(sc);
7789 		delay(10*1000);
7790 
7791 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7792 		CSR_WRITE_FLUSH(sc);
7793 		delay(150);
7794 #if 0
7795 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7796 #endif
7797 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
7798 		break;
7799 	case WM_T_82544:	/* reset 10000us */
7800 	case WM_T_82540:
7801 	case WM_T_82545:
7802 	case WM_T_82545_3:
7803 	case WM_T_82546:
7804 	case WM_T_82546_3:
7805 	case WM_T_82541:
7806 	case WM_T_82541_2:
7807 	case WM_T_82547:
7808 	case WM_T_82547_2:
7809 	case WM_T_82571:	/* reset 100us */
7810 	case WM_T_82572:
7811 	case WM_T_82573:
7812 	case WM_T_82574:
7813 	case WM_T_82575:
7814 	case WM_T_82576:
7815 	case WM_T_82580:
7816 	case WM_T_I350:
7817 	case WM_T_I354:
7818 	case WM_T_I210:
7819 	case WM_T_I211:
7820 	case WM_T_82583:
7821 	case WM_T_80003:
7822 		/* generic reset */
7823 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7824 		CSR_WRITE_FLUSH(sc);
7825 		delay(20000);
7826 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7827 		CSR_WRITE_FLUSH(sc);
7828 		delay(20000);
7829 
7830 		if ((sc->sc_type == WM_T_82541)
7831 		    || (sc->sc_type == WM_T_82541_2)
7832 		    || (sc->sc_type == WM_T_82547)
7833 		    || (sc->sc_type == WM_T_82547_2)) {
7834 			/* workaround for igp are done in igp_reset() */
7835 			/* XXX add code to set LED after phy reset */
7836 		}
7837 		break;
7838 	case WM_T_ICH8:
7839 	case WM_T_ICH9:
7840 	case WM_T_ICH10:
7841 	case WM_T_PCH:
7842 	case WM_T_PCH2:
7843 	case WM_T_PCH_LPT:
7844 	case WM_T_PCH_SPT:
7845 		/* generic reset */
7846 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7847 		CSR_WRITE_FLUSH(sc);
7848 		delay(100);
7849 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7850 		CSR_WRITE_FLUSH(sc);
7851 		delay(150);
7852 		break;
7853 	default:
7854 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7855 		    __func__);
7856 		break;
7857 	}
7858 
7859 	/* release PHY semaphore */
7860 	switch (sc->sc_type) {
7861 	case WM_T_82571:
7862 	case WM_T_82572:
7863 	case WM_T_82573:
7864 	case WM_T_82574:
7865 	case WM_T_82583:
7866 		 /* XXX should put sw semaphore, too */
7867 		wm_put_swsm_semaphore(sc);
7868 		break;
7869 	case WM_T_82575:
7870 	case WM_T_82576:
7871 	case WM_T_82580:
7872 	case WM_T_I350:
7873 	case WM_T_I354:
7874 	case WM_T_I210:
7875 	case WM_T_I211:
7876 	case WM_T_80003:
7877 		wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
7878 		break;
7879 	case WM_T_ICH8:
7880 	case WM_T_ICH9:
7881 	case WM_T_ICH10:
7882 	case WM_T_PCH:
7883 	case WM_T_PCH2:
7884 	case WM_T_PCH_LPT:
7885 	case WM_T_PCH_SPT:
7886 		wm_put_swfwhw_semaphore(sc);
7887 		break;
7888 	default:
7889 		/* nothing to do */
7890 		rv = 0;
7891 		break;
7892 	}
7893 
7894 	/* get_cfg_done */
7895 	wm_get_cfg_done(sc);
7896 
7897 	/* extra setup */
7898 	switch (sc->sc_type) {
7899 	case WM_T_82542_2_0:
7900 	case WM_T_82542_2_1:
7901 	case WM_T_82543:
7902 	case WM_T_82544:
7903 	case WM_T_82540:
7904 	case WM_T_82545:
7905 	case WM_T_82545_3:
7906 	case WM_T_82546:
7907 	case WM_T_82546_3:
7908 	case WM_T_82541_2:
7909 	case WM_T_82547_2:
7910 	case WM_T_82571:
7911 	case WM_T_82572:
7912 	case WM_T_82573:
7913 	case WM_T_82575:
7914 	case WM_T_82576:
7915 	case WM_T_82580:
7916 	case WM_T_I350:
7917 	case WM_T_I354:
7918 	case WM_T_I210:
7919 	case WM_T_I211:
7920 	case WM_T_80003:
7921 		/* null */
7922 		break;
7923 	case WM_T_82574:
7924 	case WM_T_82583:
7925 		wm_lplu_d0_disable(sc);
7926 		break;
7927 	case WM_T_82541:
7928 	case WM_T_82547:
7929 		/* XXX Configure actively LED after PHY reset */
7930 		break;
7931 	case WM_T_ICH8:
7932 	case WM_T_ICH9:
7933 	case WM_T_ICH10:
7934 	case WM_T_PCH:
7935 	case WM_T_PCH2:
7936 	case WM_T_PCH_LPT:
7937 	case WM_T_PCH_SPT:
7938 		/* Allow time for h/w to get to a quiescent state afer reset */
7939 		delay(10*1000);
7940 
7941 		if (sc->sc_type == WM_T_PCH)
7942 			wm_hv_phy_workaround_ich8lan(sc);
7943 
7944 		if (sc->sc_type == WM_T_PCH2)
7945 			wm_lv_phy_workaround_ich8lan(sc);
7946 
7947 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
7948 			/*
7949 			 * dummy read to clear the phy wakeup bit after lcd
7950 			 * reset
7951 			 */
7952 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
7953 		}
7954 
7955 		/*
7956 		 * XXX Configure the LCD with th extended configuration region
7957 		 * in NVM
7958 		 */
7959 
7960 		/* Disable D0 LPLU. */
7961 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
7962 			wm_lplu_d0_disable_pch(sc);
7963 		else
7964 			wm_lplu_d0_disable(sc);	/* ICH* */
7965 		break;
7966 	default:
7967 		panic("%s: unknown type\n", __func__);
7968 		break;
7969 	}
7970 }
7971 
7972 /*
7973  * wm_get_phy_id_82575:
7974  *
7975  * Return PHY ID. Return -1 if it failed.
7976  */
7977 static int
7978 wm_get_phy_id_82575(struct wm_softc *sc)
7979 {
7980 	uint32_t reg;
7981 	int phyid = -1;
7982 
7983 	/* XXX */
7984 	if ((sc->sc_flags & WM_F_SGMII) == 0)
7985 		return -1;
7986 
7987 	if (wm_sgmii_uses_mdio(sc)) {
7988 		switch (sc->sc_type) {
7989 		case WM_T_82575:
7990 		case WM_T_82576:
7991 			reg = CSR_READ(sc, WMREG_MDIC);
7992 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
7993 			break;
7994 		case WM_T_82580:
7995 		case WM_T_I350:
7996 		case WM_T_I354:
7997 		case WM_T_I210:
7998 		case WM_T_I211:
7999 			reg = CSR_READ(sc, WMREG_MDICNFG);
8000 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8001 			break;
8002 		default:
8003 			return -1;
8004 		}
8005 	}
8006 
8007 	return phyid;
8008 }
8009 
8010 
8011 /*
8012  * wm_gmii_mediainit:
8013  *
8014  *	Initialize media for use on 1000BASE-T devices.
8015  */
8016 static void
8017 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8018 {
8019 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8020 	struct mii_data *mii = &sc->sc_mii;
8021 	uint32_t reg;
8022 
8023 	/* We have GMII. */
8024 	sc->sc_flags |= WM_F_HAS_MII;
8025 
8026 	if (sc->sc_type == WM_T_80003)
8027 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
8028 	else
8029 		sc->sc_tipg = TIPG_1000T_DFLT;
8030 
8031 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8032 	if ((sc->sc_type == WM_T_82580)
8033 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8034 	    || (sc->sc_type == WM_T_I211)) {
8035 		reg = CSR_READ(sc, WMREG_PHPM);
8036 		reg &= ~PHPM_GO_LINK_D;
8037 		CSR_WRITE(sc, WMREG_PHPM, reg);
8038 	}
8039 
8040 	/*
8041 	 * Let the chip set speed/duplex on its own based on
8042 	 * signals from the PHY.
8043 	 * XXXbouyer - I'm not sure this is right for the 80003,
8044 	 * the em driver only sets CTRL_SLU here - but it seems to work.
8045 	 */
8046 	sc->sc_ctrl |= CTRL_SLU;
8047 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8048 
8049 	/* Initialize our media structures and probe the GMII. */
8050 	mii->mii_ifp = ifp;
8051 
8052 	/*
8053 	 * Determine the PHY access method.
8054 	 *
8055 	 *  For SGMII, use SGMII specific method.
8056 	 *
8057 	 *  For some devices, we can determine the PHY access method
8058 	 * from sc_type.
8059 	 *
8060 	 *  For ICH and PCH variants, it's difficult to determine the PHY
8061 	 * access  method by sc_type, so use the PCI product ID for some
8062 	 * devices.
8063 	 * For other ICH8 variants, try to use igp's method. If the PHY
8064 	 * can't detect, then use bm's method.
8065 	 */
8066 	switch (prodid) {
8067 	case PCI_PRODUCT_INTEL_PCH_M_LM:
8068 	case PCI_PRODUCT_INTEL_PCH_M_LC:
8069 		/* 82577 */
8070 		sc->sc_phytype = WMPHY_82577;
8071 		break;
8072 	case PCI_PRODUCT_INTEL_PCH_D_DM:
8073 	case PCI_PRODUCT_INTEL_PCH_D_DC:
8074 		/* 82578 */
8075 		sc->sc_phytype = WMPHY_82578;
8076 		break;
8077 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8078 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
8079 		/* 82579 */
8080 		sc->sc_phytype = WMPHY_82579;
8081 		break;
8082 	case PCI_PRODUCT_INTEL_82801I_BM:
8083 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8084 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8085 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8086 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8087 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8088 		/* 82567 */
8089 		sc->sc_phytype = WMPHY_BM;
8090 		mii->mii_readreg = wm_gmii_bm_readreg;
8091 		mii->mii_writereg = wm_gmii_bm_writereg;
8092 		break;
8093 	default:
8094 		if (((sc->sc_flags & WM_F_SGMII) != 0)
8095 		    && !wm_sgmii_uses_mdio(sc)){
8096 			/* SGMII */
8097 			mii->mii_readreg = wm_sgmii_readreg;
8098 			mii->mii_writereg = wm_sgmii_writereg;
8099 		} else if (sc->sc_type >= WM_T_80003) {
8100 			/* 80003 */
8101 			mii->mii_readreg = wm_gmii_i80003_readreg;
8102 			mii->mii_writereg = wm_gmii_i80003_writereg;
8103 		} else if (sc->sc_type >= WM_T_I210) {
8104 			/* I210 and I211 */
8105 			mii->mii_readreg = wm_gmii_gs40g_readreg;
8106 			mii->mii_writereg = wm_gmii_gs40g_writereg;
8107 		} else if (sc->sc_type >= WM_T_82580) {
8108 			/* 82580, I350 and I354 */
8109 			sc->sc_phytype = WMPHY_82580;
8110 			mii->mii_readreg = wm_gmii_82580_readreg;
8111 			mii->mii_writereg = wm_gmii_82580_writereg;
8112 		} else if (sc->sc_type >= WM_T_82544) {
8113 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
8114 			mii->mii_readreg = wm_gmii_i82544_readreg;
8115 			mii->mii_writereg = wm_gmii_i82544_writereg;
8116 		} else {
8117 			mii->mii_readreg = wm_gmii_i82543_readreg;
8118 			mii->mii_writereg = wm_gmii_i82543_writereg;
8119 		}
8120 		break;
8121 	}
8122 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8123 		/* All PCH* use _hv_ */
8124 		mii->mii_readreg = wm_gmii_hv_readreg;
8125 		mii->mii_writereg = wm_gmii_hv_writereg;
8126 	}
8127 	mii->mii_statchg = wm_gmii_statchg;
8128 
8129 	wm_gmii_reset(sc);
8130 
8131 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
8132 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8133 	    wm_gmii_mediastatus);
8134 
8135 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8136 	    || (sc->sc_type == WM_T_82580)
8137 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8138 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8139 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
8140 			/* Attach only one port */
8141 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8142 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
8143 		} else {
8144 			int i, id;
8145 			uint32_t ctrl_ext;
8146 
8147 			id = wm_get_phy_id_82575(sc);
8148 			if (id != -1) {
8149 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8150 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8151 			}
8152 			if ((id == -1)
8153 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8154 				/* Power on sgmii phy if it is disabled */
8155 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8156 				CSR_WRITE(sc, WMREG_CTRL_EXT,
8157 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8158 				CSR_WRITE_FLUSH(sc);
8159 				delay(300*1000); /* XXX too long */
8160 
8161 				/* from 1 to 8 */
8162 				for (i = 1; i < 8; i++)
8163 					mii_attach(sc->sc_dev, &sc->sc_mii,
8164 					    0xffffffff, i, MII_OFFSET_ANY,
8165 					    MIIF_DOPAUSE);
8166 
8167 				/* restore previous sfp cage power state */
8168 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8169 			}
8170 		}
8171 	} else {
8172 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8173 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8174 	}
8175 
8176 	/*
8177 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8178 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8179 	 */
8180 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8181 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
8182 		wm_set_mdio_slow_mode_hv(sc);
8183 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8184 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8185 	}
8186 
8187 	/*
8188 	 * (For ICH8 variants)
8189 	 * If PHY detection failed, use BM's r/w function and retry.
8190 	 */
8191 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8192 		/* if failed, retry with *_bm_* */
8193 		mii->mii_readreg = wm_gmii_bm_readreg;
8194 		mii->mii_writereg = wm_gmii_bm_writereg;
8195 
8196 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8197 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8198 	}
8199 
8200 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8201 		/* Any PHY wasn't find */
8202 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8203 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8204 		sc->sc_phytype = WMPHY_NONE;
8205 	} else {
8206 		/*
8207 		 * PHY Found!
8208 		 * Check PHY type.
8209 		 */
8210 		uint32_t model;
8211 		struct mii_softc *child;
8212 
8213 		child = LIST_FIRST(&mii->mii_phys);
8214 		model = child->mii_mpd_model;
8215 		if (model == MII_MODEL_yyINTEL_I82566)
8216 			sc->sc_phytype = WMPHY_IGP_3;
8217 
8218 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8219 	}
8220 }
8221 
8222 /*
8223  * wm_gmii_mediachange:	[ifmedia interface function]
8224  *
8225  *	Set hardware to newly-selected media on a 1000BASE-T device.
8226  */
8227 static int
8228 wm_gmii_mediachange(struct ifnet *ifp)
8229 {
8230 	struct wm_softc *sc = ifp->if_softc;
8231 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8232 	int rc;
8233 
8234 	if ((ifp->if_flags & IFF_UP) == 0)
8235 		return 0;
8236 
8237 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8238 	sc->sc_ctrl |= CTRL_SLU;
8239 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8240 	    || (sc->sc_type > WM_T_82543)) {
8241 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8242 	} else {
8243 		sc->sc_ctrl &= ~CTRL_ASDE;
8244 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8245 		if (ife->ifm_media & IFM_FDX)
8246 			sc->sc_ctrl |= CTRL_FD;
8247 		switch (IFM_SUBTYPE(ife->ifm_media)) {
8248 		case IFM_10_T:
8249 			sc->sc_ctrl |= CTRL_SPEED_10;
8250 			break;
8251 		case IFM_100_TX:
8252 			sc->sc_ctrl |= CTRL_SPEED_100;
8253 			break;
8254 		case IFM_1000_T:
8255 			sc->sc_ctrl |= CTRL_SPEED_1000;
8256 			break;
8257 		default:
8258 			panic("wm_gmii_mediachange: bad media 0x%x",
8259 			    ife->ifm_media);
8260 		}
8261 	}
8262 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8263 	if (sc->sc_type <= WM_T_82543)
8264 		wm_gmii_reset(sc);
8265 
8266 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8267 		return 0;
8268 	return rc;
8269 }
8270 
8271 /*
8272  * wm_gmii_mediastatus:	[ifmedia interface function]
8273  *
8274  *	Get the current interface media status on a 1000BASE-T device.
8275  */
8276 static void
8277 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8278 {
8279 	struct wm_softc *sc = ifp->if_softc;
8280 
8281 	ether_mediastatus(ifp, ifmr);
8282 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8283 	    | sc->sc_flowflags;
8284 }
8285 
8286 #define	MDI_IO		CTRL_SWDPIN(2)
8287 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
8288 #define	MDI_CLK		CTRL_SWDPIN(3)
8289 
8290 static void
8291 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8292 {
8293 	uint32_t i, v;
8294 
8295 	v = CSR_READ(sc, WMREG_CTRL);
8296 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8297 	v |= MDI_DIR | CTRL_SWDPIO(3);
8298 
8299 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8300 		if (data & i)
8301 			v |= MDI_IO;
8302 		else
8303 			v &= ~MDI_IO;
8304 		CSR_WRITE(sc, WMREG_CTRL, v);
8305 		CSR_WRITE_FLUSH(sc);
8306 		delay(10);
8307 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8308 		CSR_WRITE_FLUSH(sc);
8309 		delay(10);
8310 		CSR_WRITE(sc, WMREG_CTRL, v);
8311 		CSR_WRITE_FLUSH(sc);
8312 		delay(10);
8313 	}
8314 }
8315 
8316 static uint32_t
8317 wm_i82543_mii_recvbits(struct wm_softc *sc)
8318 {
8319 	uint32_t v, i, data = 0;
8320 
8321 	v = CSR_READ(sc, WMREG_CTRL);
8322 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8323 	v |= CTRL_SWDPIO(3);
8324 
8325 	CSR_WRITE(sc, WMREG_CTRL, v);
8326 	CSR_WRITE_FLUSH(sc);
8327 	delay(10);
8328 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8329 	CSR_WRITE_FLUSH(sc);
8330 	delay(10);
8331 	CSR_WRITE(sc, WMREG_CTRL, v);
8332 	CSR_WRITE_FLUSH(sc);
8333 	delay(10);
8334 
8335 	for (i = 0; i < 16; i++) {
8336 		data <<= 1;
8337 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8338 		CSR_WRITE_FLUSH(sc);
8339 		delay(10);
8340 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8341 			data |= 1;
8342 		CSR_WRITE(sc, WMREG_CTRL, v);
8343 		CSR_WRITE_FLUSH(sc);
8344 		delay(10);
8345 	}
8346 
8347 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8348 	CSR_WRITE_FLUSH(sc);
8349 	delay(10);
8350 	CSR_WRITE(sc, WMREG_CTRL, v);
8351 	CSR_WRITE_FLUSH(sc);
8352 	delay(10);
8353 
8354 	return data;
8355 }
8356 
8357 #undef MDI_IO
8358 #undef MDI_DIR
8359 #undef MDI_CLK
8360 
8361 /*
8362  * wm_gmii_i82543_readreg:	[mii interface function]
8363  *
8364  *	Read a PHY register on the GMII (i82543 version).
8365  */
8366 static int
8367 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8368 {
8369 	struct wm_softc *sc = device_private(self);
8370 	int rv;
8371 
8372 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8373 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8374 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8375 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8376 
8377 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8378 	    device_xname(sc->sc_dev), phy, reg, rv));
8379 
8380 	return rv;
8381 }
8382 
8383 /*
8384  * wm_gmii_i82543_writereg:	[mii interface function]
8385  *
8386  *	Write a PHY register on the GMII (i82543 version).
8387  */
8388 static void
8389 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8390 {
8391 	struct wm_softc *sc = device_private(self);
8392 
8393 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8394 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8395 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8396 	    (MII_COMMAND_START << 30), 32);
8397 }
8398 
8399 /*
8400  * wm_gmii_i82544_readreg:	[mii interface function]
8401  *
8402  *	Read a PHY register on the GMII.
8403  */
8404 static int
8405 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8406 {
8407 	struct wm_softc *sc = device_private(self);
8408 	uint32_t mdic = 0;
8409 	int i, rv;
8410 
8411 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8412 	    MDIC_REGADD(reg));
8413 
8414 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8415 		mdic = CSR_READ(sc, WMREG_MDIC);
8416 		if (mdic & MDIC_READY)
8417 			break;
8418 		delay(50);
8419 	}
8420 
8421 	if ((mdic & MDIC_READY) == 0) {
8422 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8423 		    device_xname(sc->sc_dev), phy, reg);
8424 		rv = 0;
8425 	} else if (mdic & MDIC_E) {
8426 #if 0 /* This is normal if no PHY is present. */
8427 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8428 		    device_xname(sc->sc_dev), phy, reg);
8429 #endif
8430 		rv = 0;
8431 	} else {
8432 		rv = MDIC_DATA(mdic);
8433 		if (rv == 0xffff)
8434 			rv = 0;
8435 	}
8436 
8437 	return rv;
8438 }
8439 
8440 /*
8441  * wm_gmii_i82544_writereg:	[mii interface function]
8442  *
8443  *	Write a PHY register on the GMII.
8444  */
8445 static void
8446 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8447 {
8448 	struct wm_softc *sc = device_private(self);
8449 	uint32_t mdic = 0;
8450 	int i;
8451 
8452 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8453 	    MDIC_REGADD(reg) | MDIC_DATA(val));
8454 
8455 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8456 		mdic = CSR_READ(sc, WMREG_MDIC);
8457 		if (mdic & MDIC_READY)
8458 			break;
8459 		delay(50);
8460 	}
8461 
8462 	if ((mdic & MDIC_READY) == 0)
8463 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8464 		    device_xname(sc->sc_dev), phy, reg);
8465 	else if (mdic & MDIC_E)
8466 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8467 		    device_xname(sc->sc_dev), phy, reg);
8468 }
8469 
8470 /*
8471  * wm_gmii_i80003_readreg:	[mii interface function]
8472  *
8473  *	Read a PHY register on the kumeran
8474  * This could be handled by the PHY layer if we didn't have to lock the
8475  * ressource ...
8476  */
8477 static int
8478 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8479 {
8480 	struct wm_softc *sc = device_private(self);
8481 	int sem;
8482 	int rv;
8483 
8484 	if (phy != 1) /* only one PHY on kumeran bus */
8485 		return 0;
8486 
8487 	sem = swfwphysem[sc->sc_funcid];
8488 	if (wm_get_swfw_semaphore(sc, sem)) {
8489 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8490 		    __func__);
8491 		return 0;
8492 	}
8493 
8494 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8495 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8496 		    reg >> GG82563_PAGE_SHIFT);
8497 	} else {
8498 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8499 		    reg >> GG82563_PAGE_SHIFT);
8500 	}
8501 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8502 	delay(200);
8503 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8504 	delay(200);
8505 
8506 	wm_put_swfw_semaphore(sc, sem);
8507 	return rv;
8508 }
8509 
8510 /*
8511  * wm_gmii_i80003_writereg:	[mii interface function]
8512  *
8513  *	Write a PHY register on the kumeran.
8514  * This could be handled by the PHY layer if we didn't have to lock the
8515  * ressource ...
8516  */
8517 static void
8518 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8519 {
8520 	struct wm_softc *sc = device_private(self);
8521 	int sem;
8522 
8523 	if (phy != 1) /* only one PHY on kumeran bus */
8524 		return;
8525 
8526 	sem = swfwphysem[sc->sc_funcid];
8527 	if (wm_get_swfw_semaphore(sc, sem)) {
8528 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8529 		    __func__);
8530 		return;
8531 	}
8532 
8533 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8534 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8535 		    reg >> GG82563_PAGE_SHIFT);
8536 	} else {
8537 		wm_gmii_i82544_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8538 		    reg >> GG82563_PAGE_SHIFT);
8539 	}
8540 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8541 	delay(200);
8542 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8543 	delay(200);
8544 
8545 	wm_put_swfw_semaphore(sc, sem);
8546 }
8547 
8548 /*
8549  * wm_gmii_bm_readreg:	[mii interface function]
8550  *
8551  *	Read a PHY register on the kumeran
8552  * This could be handled by the PHY layer if we didn't have to lock the
8553  * ressource ...
8554  */
8555 static int
8556 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8557 {
8558 	struct wm_softc *sc = device_private(self);
8559 	int sem;
8560 	int rv;
8561 
8562 	sem = swfwphysem[sc->sc_funcid];
8563 	if (wm_get_swfw_semaphore(sc, sem)) {
8564 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8565 		    __func__);
8566 		return 0;
8567 	}
8568 
8569 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8570 		if (phy == 1)
8571 			wm_gmii_i82544_writereg(self, phy,
8572 			    MII_IGPHY_PAGE_SELECT, reg);
8573 		else
8574 			wm_gmii_i82544_writereg(self, phy,
8575 			    GG82563_PHY_PAGE_SELECT,
8576 			    reg >> GG82563_PAGE_SHIFT);
8577 	}
8578 
8579 	rv = wm_gmii_i82544_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8580 	wm_put_swfw_semaphore(sc, sem);
8581 	return rv;
8582 }
8583 
8584 /*
8585  * wm_gmii_bm_writereg:	[mii interface function]
8586  *
8587  *	Write a PHY register on the kumeran.
8588  * This could be handled by the PHY layer if we didn't have to lock the
8589  * ressource ...
8590  */
8591 static void
8592 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8593 {
8594 	struct wm_softc *sc = device_private(self);
8595 	int sem;
8596 
8597 	sem = swfwphysem[sc->sc_funcid];
8598 	if (wm_get_swfw_semaphore(sc, sem)) {
8599 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8600 		    __func__);
8601 		return;
8602 	}
8603 
8604 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8605 		if (phy == 1)
8606 			wm_gmii_i82544_writereg(self, phy,
8607 			    MII_IGPHY_PAGE_SELECT, reg);
8608 		else
8609 			wm_gmii_i82544_writereg(self, phy,
8610 			    GG82563_PHY_PAGE_SELECT,
8611 			    reg >> GG82563_PAGE_SHIFT);
8612 	}
8613 
8614 	wm_gmii_i82544_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8615 	wm_put_swfw_semaphore(sc, sem);
8616 }
8617 
8618 static void
8619 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8620 {
8621 	struct wm_softc *sc = device_private(self);
8622 	uint16_t regnum = BM_PHY_REG_NUM(offset);
8623 	uint16_t wuce;
8624 
8625 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
8626 	if (sc->sc_type == WM_T_PCH) {
8627 		/* XXX e1000 driver do nothing... why? */
8628 	}
8629 
8630 	/* Set page 769 */
8631 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8632 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8633 
8634 	wuce = wm_gmii_i82544_readreg(self, 1, BM_WUC_ENABLE_REG);
8635 
8636 	wuce &= ~BM_WUC_HOST_WU_BIT;
8637 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG,
8638 	    wuce | BM_WUC_ENABLE_BIT);
8639 
8640 	/* Select page 800 */
8641 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8642 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8643 
8644 	/* Write page 800 */
8645 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8646 
8647 	if (rd)
8648 		*val = wm_gmii_i82544_readreg(self, 1, BM_WUC_DATA_OPCODE);
8649 	else
8650 		wm_gmii_i82544_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8651 
8652 	/* Set page 769 */
8653 	wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8654 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8655 
8656 	wm_gmii_i82544_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8657 }
8658 
8659 /*
8660  * wm_gmii_hv_readreg:	[mii interface function]
8661  *
8662  *	Read a PHY register on the kumeran
8663  * This could be handled by the PHY layer if we didn't have to lock the
8664  * ressource ...
8665  */
8666 static int
8667 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8668 {
8669 	struct wm_softc *sc = device_private(self);
8670 	uint16_t page = BM_PHY_REG_PAGE(reg);
8671 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8672 	uint16_t val;
8673 	int rv;
8674 
8675 	if (wm_get_swfwhw_semaphore(sc)) {
8676 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8677 		    __func__);
8678 		return 0;
8679 	}
8680 
8681 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8682 	if (sc->sc_phytype == WMPHY_82577) {
8683 		/* XXX must write */
8684 	}
8685 
8686 	/* Page 800 works differently than the rest so it has its own func */
8687 	if (page == BM_WUC_PAGE) {
8688 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8689 		return val;
8690 	}
8691 
8692 	/*
8693 	 * Lower than page 768 works differently than the rest so it has its
8694 	 * own func
8695 	 */
8696 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8697 		printf("gmii_hv_readreg!!!\n");
8698 		return 0;
8699 	}
8700 
8701 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8702 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8703 		    page << BME1000_PAGE_SHIFT);
8704 	}
8705 
8706 	rv = wm_gmii_i82544_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8707 	wm_put_swfwhw_semaphore(sc);
8708 	return rv;
8709 }
8710 
8711 /*
8712  * wm_gmii_hv_writereg:	[mii interface function]
8713  *
8714  *	Write a PHY register on the kumeran.
8715  * This could be handled by the PHY layer if we didn't have to lock the
8716  * ressource ...
8717  */
8718 static void
8719 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8720 {
8721 	struct wm_softc *sc = device_private(self);
8722 	uint16_t page = BM_PHY_REG_PAGE(reg);
8723 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8724 
8725 	if (wm_get_swfwhw_semaphore(sc)) {
8726 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8727 		    __func__);
8728 		return;
8729 	}
8730 
8731 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8732 
8733 	/* Page 800 works differently than the rest so it has its own func */
8734 	if (page == BM_WUC_PAGE) {
8735 		uint16_t tmp;
8736 
8737 		tmp = val;
8738 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8739 		return;
8740 	}
8741 
8742 	/*
8743 	 * Lower than page 768 works differently than the rest so it has its
8744 	 * own func
8745 	 */
8746 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8747 		printf("gmii_hv_writereg!!!\n");
8748 		return;
8749 	}
8750 
8751 	/*
8752 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
8753 	 * Power Down (whenever bit 11 of the PHY control register is set)
8754 	 */
8755 
8756 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8757 		wm_gmii_i82544_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8758 		    page << BME1000_PAGE_SHIFT);
8759 	}
8760 
8761 	wm_gmii_i82544_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8762 	wm_put_swfwhw_semaphore(sc);
8763 }
8764 
8765 /*
8766  * wm_gmii_82580_readreg:	[mii interface function]
8767  *
8768  *	Read a PHY register on the 82580 and I350.
8769  * This could be handled by the PHY layer if we didn't have to lock the
8770  * ressource ...
8771  */
8772 static int
8773 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8774 {
8775 	struct wm_softc *sc = device_private(self);
8776 	int sem;
8777 	int rv;
8778 
8779 	sem = swfwphysem[sc->sc_funcid];
8780 	if (wm_get_swfw_semaphore(sc, sem)) {
8781 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8782 		    __func__);
8783 		return 0;
8784 	}
8785 
8786 	rv = wm_gmii_i82544_readreg(self, phy, reg);
8787 
8788 	wm_put_swfw_semaphore(sc, sem);
8789 	return rv;
8790 }
8791 
8792 /*
8793  * wm_gmii_82580_writereg:	[mii interface function]
8794  *
8795  *	Write a PHY register on the 82580 and I350.
8796  * This could be handled by the PHY layer if we didn't have to lock the
8797  * ressource ...
8798  */
8799 static void
8800 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8801 {
8802 	struct wm_softc *sc = device_private(self);
8803 	int sem;
8804 
8805 	sem = swfwphysem[sc->sc_funcid];
8806 	if (wm_get_swfw_semaphore(sc, sem)) {
8807 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8808 		    __func__);
8809 		return;
8810 	}
8811 
8812 	wm_gmii_i82544_writereg(self, phy, reg, val);
8813 
8814 	wm_put_swfw_semaphore(sc, sem);
8815 }
8816 
8817 /*
8818  * wm_gmii_gs40g_readreg:	[mii interface function]
8819  *
8820  *	Read a PHY register on the I2100 and I211.
8821  * This could be handled by the PHY layer if we didn't have to lock the
8822  * ressource ...
8823  */
8824 static int
8825 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8826 {
8827 	struct wm_softc *sc = device_private(self);
8828 	int sem;
8829 	int page, offset;
8830 	int rv;
8831 
8832 	/* Acquire semaphore */
8833 	sem = swfwphysem[sc->sc_funcid];
8834 	if (wm_get_swfw_semaphore(sc, sem)) {
8835 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8836 		    __func__);
8837 		return 0;
8838 	}
8839 
8840 	/* Page select */
8841 	page = reg >> GS40G_PAGE_SHIFT;
8842 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8843 
8844 	/* Read reg */
8845 	offset = reg & GS40G_OFFSET_MASK;
8846 	rv = wm_gmii_i82544_readreg(self, phy, offset);
8847 
8848 	wm_put_swfw_semaphore(sc, sem);
8849 	return rv;
8850 }
8851 
8852 /*
8853  * wm_gmii_gs40g_writereg:	[mii interface function]
8854  *
8855  *	Write a PHY register on the I210 and I211.
8856  * This could be handled by the PHY layer if we didn't have to lock the
8857  * ressource ...
8858  */
8859 static void
8860 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8861 {
8862 	struct wm_softc *sc = device_private(self);
8863 	int sem;
8864 	int page, offset;
8865 
8866 	/* Acquire semaphore */
8867 	sem = swfwphysem[sc->sc_funcid];
8868 	if (wm_get_swfw_semaphore(sc, sem)) {
8869 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8870 		    __func__);
8871 		return;
8872 	}
8873 
8874 	/* Page select */
8875 	page = reg >> GS40G_PAGE_SHIFT;
8876 	wm_gmii_i82544_writereg(self, phy, GS40G_PAGE_SELECT, page);
8877 
8878 	/* Write reg */
8879 	offset = reg & GS40G_OFFSET_MASK;
8880 	wm_gmii_i82544_writereg(self, phy, offset, val);
8881 
8882 	/* Release semaphore */
8883 	wm_put_swfw_semaphore(sc, sem);
8884 }
8885 
8886 /*
8887  * wm_gmii_statchg:	[mii interface function]
8888  *
8889  *	Callback from MII layer when media changes.
8890  */
8891 static void
8892 wm_gmii_statchg(struct ifnet *ifp)
8893 {
8894 	struct wm_softc *sc = ifp->if_softc;
8895 	struct mii_data *mii = &sc->sc_mii;
8896 
8897 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
8898 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8899 	sc->sc_fcrtl &= ~FCRTL_XONE;
8900 
8901 	/*
8902 	 * Get flow control negotiation result.
8903 	 */
8904 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
8905 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
8906 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
8907 		mii->mii_media_active &= ~IFM_ETH_FMASK;
8908 	}
8909 
8910 	if (sc->sc_flowflags & IFM_FLOW) {
8911 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
8912 			sc->sc_ctrl |= CTRL_TFCE;
8913 			sc->sc_fcrtl |= FCRTL_XONE;
8914 		}
8915 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
8916 			sc->sc_ctrl |= CTRL_RFCE;
8917 	}
8918 
8919 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
8920 		DPRINTF(WM_DEBUG_LINK,
8921 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
8922 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8923 	} else {
8924 		DPRINTF(WM_DEBUG_LINK,
8925 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
8926 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8927 	}
8928 
8929 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8930 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8931 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
8932 						 : WMREG_FCRTL, sc->sc_fcrtl);
8933 	if (sc->sc_type == WM_T_80003) {
8934 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
8935 		case IFM_1000_T:
8936 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8937 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
8938 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
8939 			break;
8940 		default:
8941 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
8942 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
8943 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
8944 			break;
8945 		}
8946 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
8947 	}
8948 }
8949 
8950 /*
8951  * wm_kmrn_readreg:
8952  *
8953  *	Read a kumeran register
8954  */
8955 static int
8956 wm_kmrn_readreg(struct wm_softc *sc, int reg)
8957 {
8958 	int rv;
8959 
8960 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
8961 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
8962 			aprint_error_dev(sc->sc_dev,
8963 			    "%s: failed to get semaphore\n", __func__);
8964 			return 0;
8965 		}
8966 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
8967 		if (wm_get_swfwhw_semaphore(sc)) {
8968 			aprint_error_dev(sc->sc_dev,
8969 			    "%s: failed to get semaphore\n", __func__);
8970 			return 0;
8971 		}
8972 	}
8973 
8974 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
8975 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
8976 	    KUMCTRLSTA_REN);
8977 	CSR_WRITE_FLUSH(sc);
8978 	delay(2);
8979 
8980 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
8981 
8982 	if (sc->sc_flags & WM_F_LOCK_SWFW)
8983 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
8984 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
8985 		wm_put_swfwhw_semaphore(sc);
8986 
8987 	return rv;
8988 }
8989 
8990 /*
8991  * wm_kmrn_writereg:
8992  *
8993  *	Write a kumeran register
8994  */
8995 static void
8996 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
8997 {
8998 
8999 	if (sc->sc_flags & WM_F_LOCK_SWFW) {
9000 		if (wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM)) {
9001 			aprint_error_dev(sc->sc_dev,
9002 			    "%s: failed to get semaphore\n", __func__);
9003 			return;
9004 		}
9005 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
9006 		if (wm_get_swfwhw_semaphore(sc)) {
9007 			aprint_error_dev(sc->sc_dev,
9008 			    "%s: failed to get semaphore\n", __func__);
9009 			return;
9010 		}
9011 	}
9012 
9013 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9014 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9015 	    (val & KUMCTRLSTA_MASK));
9016 
9017 	if (sc->sc_flags & WM_F_LOCK_SWFW)
9018 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9019 	else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
9020 		wm_put_swfwhw_semaphore(sc);
9021 }
9022 
9023 /* SGMII related */
9024 
9025 /*
9026  * wm_sgmii_uses_mdio
9027  *
9028  * Check whether the transaction is to the internal PHY or the external
9029  * MDIO interface. Return true if it's MDIO.
9030  */
9031 static bool
9032 wm_sgmii_uses_mdio(struct wm_softc *sc)
9033 {
9034 	uint32_t reg;
9035 	bool ismdio = false;
9036 
9037 	switch (sc->sc_type) {
9038 	case WM_T_82575:
9039 	case WM_T_82576:
9040 		reg = CSR_READ(sc, WMREG_MDIC);
9041 		ismdio = ((reg & MDIC_DEST) != 0);
9042 		break;
9043 	case WM_T_82580:
9044 	case WM_T_I350:
9045 	case WM_T_I354:
9046 	case WM_T_I210:
9047 	case WM_T_I211:
9048 		reg = CSR_READ(sc, WMREG_MDICNFG);
9049 		ismdio = ((reg & MDICNFG_DEST) != 0);
9050 		break;
9051 	default:
9052 		break;
9053 	}
9054 
9055 	return ismdio;
9056 }
9057 
9058 /*
9059  * wm_sgmii_readreg:	[mii interface function]
9060  *
9061  *	Read a PHY register on the SGMII
9062  * This could be handled by the PHY layer if we didn't have to lock the
9063  * ressource ...
9064  */
9065 static int
9066 wm_sgmii_readreg(device_t self, int phy, int reg)
9067 {
9068 	struct wm_softc *sc = device_private(self);
9069 	uint32_t i2ccmd;
9070 	int i, rv;
9071 
9072 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9073 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9074 		    __func__);
9075 		return 0;
9076 	}
9077 
9078 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9079 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9080 	    | I2CCMD_OPCODE_READ;
9081 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9082 
9083 	/* Poll the ready bit */
9084 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9085 		delay(50);
9086 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9087 		if (i2ccmd & I2CCMD_READY)
9088 			break;
9089 	}
9090 	if ((i2ccmd & I2CCMD_READY) == 0)
9091 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
9092 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9093 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9094 
9095 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9096 
9097 	wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
9098 	return rv;
9099 }
9100 
9101 /*
9102  * wm_sgmii_writereg:	[mii interface function]
9103  *
9104  *	Write a PHY register on the SGMII.
9105  * This could be handled by the PHY layer if we didn't have to lock the
9106  * ressource ...
9107  */
9108 static void
9109 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9110 {
9111 	struct wm_softc *sc = device_private(self);
9112 	uint32_t i2ccmd;
9113 	int i;
9114 	int val_swapped;
9115 
9116 	if (wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid])) {
9117 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9118 		    __func__);
9119 		return;
9120 	}
9121 	/* Swap the data bytes for the I2C interface */
9122 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9123 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9124 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9125 	    | I2CCMD_OPCODE_WRITE | val_swapped;
9126 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9127 
9128 	/* Poll the ready bit */
9129 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9130 		delay(50);
9131 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9132 		if (i2ccmd & I2CCMD_READY)
9133 			break;
9134 	}
9135 	if ((i2ccmd & I2CCMD_READY) == 0)
9136 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9137 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9138 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9139 
9140 	wm_put_swfw_semaphore(sc, SWFW_PHY0_SM);
9141 }
9142 
9143 /* TBI related */
9144 
9145 /*
9146  * wm_tbi_mediainit:
9147  *
9148  *	Initialize media for use on 1000BASE-X devices.
9149  */
9150 static void
9151 wm_tbi_mediainit(struct wm_softc *sc)
9152 {
9153 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9154 	const char *sep = "";
9155 
9156 	if (sc->sc_type < WM_T_82543)
9157 		sc->sc_tipg = TIPG_WM_DFLT;
9158 	else
9159 		sc->sc_tipg = TIPG_LG_DFLT;
9160 
9161 	sc->sc_tbi_serdes_anegticks = 5;
9162 
9163 	/* Initialize our media structures */
9164 	sc->sc_mii.mii_ifp = ifp;
9165 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
9166 
9167 	if ((sc->sc_type >= WM_T_82575)
9168 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9169 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9170 		    wm_serdes_mediachange, wm_serdes_mediastatus);
9171 	else
9172 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9173 		    wm_tbi_mediachange, wm_tbi_mediastatus);
9174 
9175 	/*
9176 	 * SWD Pins:
9177 	 *
9178 	 *	0 = Link LED (output)
9179 	 *	1 = Loss Of Signal (input)
9180 	 */
9181 	sc->sc_ctrl |= CTRL_SWDPIO(0);
9182 
9183 	/* XXX Perhaps this is only for TBI */
9184 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9185 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9186 
9187 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9188 		sc->sc_ctrl &= ~CTRL_LRST;
9189 
9190 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9191 
9192 #define	ADD(ss, mm, dd)							\
9193 do {									\
9194 	aprint_normal("%s%s", sep, ss);					\
9195 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9196 	sep = ", ";							\
9197 } while (/*CONSTCOND*/0)
9198 
9199 	aprint_normal_dev(sc->sc_dev, "");
9200 
9201 	/* Only 82545 is LX */
9202 	if (sc->sc_type == WM_T_82545) {
9203 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9204 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9205 	} else {
9206 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9207 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9208 	}
9209 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9210 	aprint_normal("\n");
9211 
9212 #undef ADD
9213 
9214 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9215 }
9216 
9217 /*
9218  * wm_tbi_mediachange:	[ifmedia interface function]
9219  *
9220  *	Set hardware to newly-selected media on a 1000BASE-X device.
9221  */
9222 static int
9223 wm_tbi_mediachange(struct ifnet *ifp)
9224 {
9225 	struct wm_softc *sc = ifp->if_softc;
9226 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9227 	uint32_t status;
9228 	int i;
9229 
9230 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9231 		/* XXX need some work for >= 82571 and < 82575 */
9232 		if (sc->sc_type < WM_T_82575)
9233 			return 0;
9234 	}
9235 
9236 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9237 	    || (sc->sc_type >= WM_T_82575))
9238 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9239 
9240 	sc->sc_ctrl &= ~CTRL_LRST;
9241 	sc->sc_txcw = TXCW_ANE;
9242 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9243 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
9244 	else if (ife->ifm_media & IFM_FDX)
9245 		sc->sc_txcw |= TXCW_FD;
9246 	else
9247 		sc->sc_txcw |= TXCW_HD;
9248 
9249 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9250 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9251 
9252 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9253 		    device_xname(sc->sc_dev), sc->sc_txcw));
9254 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9255 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9256 	CSR_WRITE_FLUSH(sc);
9257 	delay(1000);
9258 
9259 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9260 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9261 
9262 	/*
9263 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9264 	 * optics detect a signal, 0 if they don't.
9265 	 */
9266 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9267 		/* Have signal; wait for the link to come up. */
9268 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9269 			delay(10000);
9270 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9271 				break;
9272 		}
9273 
9274 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9275 			    device_xname(sc->sc_dev),i));
9276 
9277 		status = CSR_READ(sc, WMREG_STATUS);
9278 		DPRINTF(WM_DEBUG_LINK,
9279 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9280 			device_xname(sc->sc_dev),status, STATUS_LU));
9281 		if (status & STATUS_LU) {
9282 			/* Link is up. */
9283 			DPRINTF(WM_DEBUG_LINK,
9284 			    ("%s: LINK: set media -> link up %s\n",
9285 			    device_xname(sc->sc_dev),
9286 			    (status & STATUS_FD) ? "FDX" : "HDX"));
9287 
9288 			/*
9289 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9290 			 * so we should update sc->sc_ctrl
9291 			 */
9292 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9293 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9294 			sc->sc_fcrtl &= ~FCRTL_XONE;
9295 			if (status & STATUS_FD)
9296 				sc->sc_tctl |=
9297 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9298 			else
9299 				sc->sc_tctl |=
9300 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9301 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9302 				sc->sc_fcrtl |= FCRTL_XONE;
9303 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9304 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9305 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
9306 				      sc->sc_fcrtl);
9307 			sc->sc_tbi_linkup = 1;
9308 		} else {
9309 			if (i == WM_LINKUP_TIMEOUT)
9310 				wm_check_for_link(sc);
9311 			/* Link is down. */
9312 			DPRINTF(WM_DEBUG_LINK,
9313 			    ("%s: LINK: set media -> link down\n",
9314 			    device_xname(sc->sc_dev)));
9315 			sc->sc_tbi_linkup = 0;
9316 		}
9317 	} else {
9318 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9319 		    device_xname(sc->sc_dev)));
9320 		sc->sc_tbi_linkup = 0;
9321 	}
9322 
9323 	wm_tbi_serdes_set_linkled(sc);
9324 
9325 	return 0;
9326 }
9327 
9328 /*
9329  * wm_tbi_mediastatus:	[ifmedia interface function]
9330  *
9331  *	Get the current interface media status on a 1000BASE-X device.
9332  */
9333 static void
9334 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9335 {
9336 	struct wm_softc *sc = ifp->if_softc;
9337 	uint32_t ctrl, status;
9338 
9339 	ifmr->ifm_status = IFM_AVALID;
9340 	ifmr->ifm_active = IFM_ETHER;
9341 
9342 	status = CSR_READ(sc, WMREG_STATUS);
9343 	if ((status & STATUS_LU) == 0) {
9344 		ifmr->ifm_active |= IFM_NONE;
9345 		return;
9346 	}
9347 
9348 	ifmr->ifm_status |= IFM_ACTIVE;
9349 	/* Only 82545 is LX */
9350 	if (sc->sc_type == WM_T_82545)
9351 		ifmr->ifm_active |= IFM_1000_LX;
9352 	else
9353 		ifmr->ifm_active |= IFM_1000_SX;
9354 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9355 		ifmr->ifm_active |= IFM_FDX;
9356 	else
9357 		ifmr->ifm_active |= IFM_HDX;
9358 	ctrl = CSR_READ(sc, WMREG_CTRL);
9359 	if (ctrl & CTRL_RFCE)
9360 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9361 	if (ctrl & CTRL_TFCE)
9362 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9363 }
9364 
9365 /* XXX TBI only */
9366 static int
9367 wm_check_for_link(struct wm_softc *sc)
9368 {
9369 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9370 	uint32_t rxcw;
9371 	uint32_t ctrl;
9372 	uint32_t status;
9373 	uint32_t sig;
9374 
9375 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9376 		/* XXX need some work for >= 82571 */
9377 		if (sc->sc_type >= WM_T_82571) {
9378 			sc->sc_tbi_linkup = 1;
9379 			return 0;
9380 		}
9381 	}
9382 
9383 	rxcw = CSR_READ(sc, WMREG_RXCW);
9384 	ctrl = CSR_READ(sc, WMREG_CTRL);
9385 	status = CSR_READ(sc, WMREG_STATUS);
9386 
9387 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9388 
9389 	DPRINTF(WM_DEBUG_LINK,
9390 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9391 		device_xname(sc->sc_dev), __func__,
9392 		((ctrl & CTRL_SWDPIN(1)) == sig),
9393 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9394 
9395 	/*
9396 	 * SWDPIN   LU RXCW
9397 	 *      0    0    0
9398 	 *      0    0    1	(should not happen)
9399 	 *      0    1    0	(should not happen)
9400 	 *      0    1    1	(should not happen)
9401 	 *      1    0    0	Disable autonego and force linkup
9402 	 *      1    0    1	got /C/ but not linkup yet
9403 	 *      1    1    0	(linkup)
9404 	 *      1    1    1	If IFM_AUTO, back to autonego
9405 	 *
9406 	 */
9407 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
9408 	    && ((status & STATUS_LU) == 0)
9409 	    && ((rxcw & RXCW_C) == 0)) {
9410 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9411 			__func__));
9412 		sc->sc_tbi_linkup = 0;
9413 		/* Disable auto-negotiation in the TXCW register */
9414 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9415 
9416 		/*
9417 		 * Force link-up and also force full-duplex.
9418 		 *
9419 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
9420 		 * so we should update sc->sc_ctrl
9421 		 */
9422 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9423 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9424 	} else if (((status & STATUS_LU) != 0)
9425 	    && ((rxcw & RXCW_C) != 0)
9426 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9427 		sc->sc_tbi_linkup = 1;
9428 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9429 			__func__));
9430 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9431 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9432 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9433 	    && ((rxcw & RXCW_C) != 0)) {
9434 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
9435 	} else {
9436 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9437 			status));
9438 	}
9439 
9440 	return 0;
9441 }
9442 
9443 /*
9444  * wm_tbi_tick:
9445  *
9446  *	Check the link on TBI devices.
9447  *	This function acts as mii_tick().
9448  */
9449 static void
9450 wm_tbi_tick(struct wm_softc *sc)
9451 {
9452 	struct mii_data *mii = &sc->sc_mii;
9453 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9454 	uint32_t status;
9455 
9456 	KASSERT(WM_CORE_LOCKED(sc));
9457 
9458 	status = CSR_READ(sc, WMREG_STATUS);
9459 
9460 	/* XXX is this needed? */
9461 	(void)CSR_READ(sc, WMREG_RXCW);
9462 	(void)CSR_READ(sc, WMREG_CTRL);
9463 
9464 	/* set link status */
9465 	if ((status & STATUS_LU) == 0) {
9466 		DPRINTF(WM_DEBUG_LINK,
9467 		    ("%s: LINK: checklink -> down\n",
9468 			device_xname(sc->sc_dev)));
9469 		sc->sc_tbi_linkup = 0;
9470 	} else if (sc->sc_tbi_linkup == 0) {
9471 		DPRINTF(WM_DEBUG_LINK,
9472 		    ("%s: LINK: checklink -> up %s\n",
9473 			device_xname(sc->sc_dev),
9474 			(status & STATUS_FD) ? "FDX" : "HDX"));
9475 		sc->sc_tbi_linkup = 1;
9476 		sc->sc_tbi_serdes_ticks = 0;
9477 	}
9478 
9479 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9480 		goto setled;
9481 
9482 	if ((status & STATUS_LU) == 0) {
9483 		sc->sc_tbi_linkup = 0;
9484 		/* If the timer expired, retry autonegotiation */
9485 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9486 		    && (++sc->sc_tbi_serdes_ticks
9487 			>= sc->sc_tbi_serdes_anegticks)) {
9488 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9489 			sc->sc_tbi_serdes_ticks = 0;
9490 			/*
9491 			 * Reset the link, and let autonegotiation do
9492 			 * its thing
9493 			 */
9494 			sc->sc_ctrl |= CTRL_LRST;
9495 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9496 			CSR_WRITE_FLUSH(sc);
9497 			delay(1000);
9498 			sc->sc_ctrl &= ~CTRL_LRST;
9499 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9500 			CSR_WRITE_FLUSH(sc);
9501 			delay(1000);
9502 			CSR_WRITE(sc, WMREG_TXCW,
9503 			    sc->sc_txcw & ~TXCW_ANE);
9504 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9505 		}
9506 	}
9507 
9508 setled:
9509 	wm_tbi_serdes_set_linkled(sc);
9510 }
9511 
9512 /* SERDES related */
9513 static void
9514 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9515 {
9516 	uint32_t reg;
9517 
9518 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9519 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
9520 		return;
9521 
9522 	reg = CSR_READ(sc, WMREG_PCS_CFG);
9523 	reg |= PCS_CFG_PCS_EN;
9524 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9525 
9526 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
9527 	reg &= ~CTRL_EXT_SWDPIN(3);
9528 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9529 	CSR_WRITE_FLUSH(sc);
9530 }
9531 
9532 static int
9533 wm_serdes_mediachange(struct ifnet *ifp)
9534 {
9535 	struct wm_softc *sc = ifp->if_softc;
9536 	bool pcs_autoneg = true; /* XXX */
9537 	uint32_t ctrl_ext, pcs_lctl, reg;
9538 
9539 	/* XXX Currently, this function is not called on 8257[12] */
9540 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9541 	    || (sc->sc_type >= WM_T_82575))
9542 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9543 
9544 	wm_serdes_power_up_link_82575(sc);
9545 
9546 	sc->sc_ctrl |= CTRL_SLU;
9547 
9548 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9549 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9550 
9551 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9552 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9553 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9554 	case CTRL_EXT_LINK_MODE_SGMII:
9555 		pcs_autoneg = true;
9556 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9557 		break;
9558 	case CTRL_EXT_LINK_MODE_1000KX:
9559 		pcs_autoneg = false;
9560 		/* FALLTHROUGH */
9561 	default:
9562 		if ((sc->sc_type == WM_T_82575)
9563 		    || (sc->sc_type == WM_T_82576)) {
9564 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9565 				pcs_autoneg = false;
9566 		}
9567 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9568 		    | CTRL_FRCFDX;
9569 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9570 	}
9571 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9572 
9573 	if (pcs_autoneg) {
9574 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9575 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9576 
9577 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
9578 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9579 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9580 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9581 	} else
9582 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9583 
9584 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9585 
9586 
9587 	return 0;
9588 }
9589 
9590 static void
9591 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9592 {
9593 	struct wm_softc *sc = ifp->if_softc;
9594 	struct mii_data *mii = &sc->sc_mii;
9595 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9596 	uint32_t pcs_adv, pcs_lpab, reg;
9597 
9598 	ifmr->ifm_status = IFM_AVALID;
9599 	ifmr->ifm_active = IFM_ETHER;
9600 
9601 	/* Check PCS */
9602 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9603 	if ((reg & PCS_LSTS_LINKOK) == 0) {
9604 		ifmr->ifm_active |= IFM_NONE;
9605 		sc->sc_tbi_linkup = 0;
9606 		goto setled;
9607 	}
9608 
9609 	sc->sc_tbi_linkup = 1;
9610 	ifmr->ifm_status |= IFM_ACTIVE;
9611 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9612 	if ((reg & PCS_LSTS_FDX) != 0)
9613 		ifmr->ifm_active |= IFM_FDX;
9614 	else
9615 		ifmr->ifm_active |= IFM_HDX;
9616 	mii->mii_media_active &= ~IFM_ETH_FMASK;
9617 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9618 		/* Check flow */
9619 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
9620 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
9621 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
9622 			goto setled;
9623 		}
9624 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9625 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9626 		DPRINTF(WM_DEBUG_LINK,
9627 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
9628 		if ((pcs_adv & TXCW_SYM_PAUSE)
9629 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
9630 			mii->mii_media_active |= IFM_FLOW
9631 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9632 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9633 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9634 		    && (pcs_lpab & TXCW_SYM_PAUSE)
9635 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9636 			mii->mii_media_active |= IFM_FLOW
9637 			    | IFM_ETH_TXPAUSE;
9638 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
9639 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9640 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9641 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9642 			mii->mii_media_active |= IFM_FLOW
9643 			    | IFM_ETH_RXPAUSE;
9644 		} else {
9645 		}
9646 	}
9647 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9648 	    | (mii->mii_media_active & IFM_ETH_FMASK);
9649 setled:
9650 	wm_tbi_serdes_set_linkled(sc);
9651 }
9652 
9653 /*
9654  * wm_serdes_tick:
9655  *
9656  *	Check the link on serdes devices.
9657  */
9658 static void
9659 wm_serdes_tick(struct wm_softc *sc)
9660 {
9661 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9662 	struct mii_data *mii = &sc->sc_mii;
9663 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9664 	uint32_t reg;
9665 
9666 	KASSERT(WM_CORE_LOCKED(sc));
9667 
9668 	mii->mii_media_status = IFM_AVALID;
9669 	mii->mii_media_active = IFM_ETHER;
9670 
9671 	/* Check PCS */
9672 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9673 	if ((reg & PCS_LSTS_LINKOK) != 0) {
9674 		mii->mii_media_status |= IFM_ACTIVE;
9675 		sc->sc_tbi_linkup = 1;
9676 		sc->sc_tbi_serdes_ticks = 0;
9677 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
9678 		if ((reg & PCS_LSTS_FDX) != 0)
9679 			mii->mii_media_active |= IFM_FDX;
9680 		else
9681 			mii->mii_media_active |= IFM_HDX;
9682 	} else {
9683 		mii->mii_media_status |= IFM_NONE;
9684 		sc->sc_tbi_linkup = 0;
9685 		    /* If the timer expired, retry autonegotiation */
9686 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9687 		    && (++sc->sc_tbi_serdes_ticks
9688 			>= sc->sc_tbi_serdes_anegticks)) {
9689 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9690 			sc->sc_tbi_serdes_ticks = 0;
9691 			/* XXX */
9692 			wm_serdes_mediachange(ifp);
9693 		}
9694 	}
9695 
9696 	wm_tbi_serdes_set_linkled(sc);
9697 }
9698 
9699 /* SFP related */
9700 
9701 static int
9702 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9703 {
9704 	uint32_t i2ccmd;
9705 	int i;
9706 
9707 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9708 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9709 
9710 	/* Poll the ready bit */
9711 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9712 		delay(50);
9713 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9714 		if (i2ccmd & I2CCMD_READY)
9715 			break;
9716 	}
9717 	if ((i2ccmd & I2CCMD_READY) == 0)
9718 		return -1;
9719 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9720 		return -1;
9721 
9722 	*data = i2ccmd & 0x00ff;
9723 
9724 	return 0;
9725 }
9726 
9727 static uint32_t
9728 wm_sfp_get_media_type(struct wm_softc *sc)
9729 {
9730 	uint32_t ctrl_ext;
9731 	uint8_t val = 0;
9732 	int timeout = 3;
9733 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9734 	int rv = -1;
9735 
9736 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9737 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9738 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9739 	CSR_WRITE_FLUSH(sc);
9740 
9741 	/* Read SFP module data */
9742 	while (timeout) {
9743 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9744 		if (rv == 0)
9745 			break;
9746 		delay(100*1000); /* XXX too big */
9747 		timeout--;
9748 	}
9749 	if (rv != 0)
9750 		goto out;
9751 	switch (val) {
9752 	case SFF_SFP_ID_SFF:
9753 		aprint_normal_dev(sc->sc_dev,
9754 		    "Module/Connector soldered to board\n");
9755 		break;
9756 	case SFF_SFP_ID_SFP:
9757 		aprint_normal_dev(sc->sc_dev, "SFP\n");
9758 		break;
9759 	case SFF_SFP_ID_UNKNOWN:
9760 		goto out;
9761 	default:
9762 		break;
9763 	}
9764 
9765 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9766 	if (rv != 0) {
9767 		goto out;
9768 	}
9769 
9770 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9771 		mediatype = WM_MEDIATYPE_SERDES;
9772 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9773 		sc->sc_flags |= WM_F_SGMII;
9774 		mediatype = WM_MEDIATYPE_COPPER;
9775 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9776 		sc->sc_flags |= WM_F_SGMII;
9777 		mediatype = WM_MEDIATYPE_SERDES;
9778 	}
9779 
9780 out:
9781 	/* Restore I2C interface setting */
9782 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9783 
9784 	return mediatype;
9785 }
9786 /*
9787  * NVM related.
9788  * Microwire, SPI (w/wo EERD) and Flash.
9789  */
9790 
9791 /* Both spi and uwire */
9792 
9793 /*
9794  * wm_eeprom_sendbits:
9795  *
9796  *	Send a series of bits to the EEPROM.
9797  */
9798 static void
9799 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9800 {
9801 	uint32_t reg;
9802 	int x;
9803 
9804 	reg = CSR_READ(sc, WMREG_EECD);
9805 
9806 	for (x = nbits; x > 0; x--) {
9807 		if (bits & (1U << (x - 1)))
9808 			reg |= EECD_DI;
9809 		else
9810 			reg &= ~EECD_DI;
9811 		CSR_WRITE(sc, WMREG_EECD, reg);
9812 		CSR_WRITE_FLUSH(sc);
9813 		delay(2);
9814 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9815 		CSR_WRITE_FLUSH(sc);
9816 		delay(2);
9817 		CSR_WRITE(sc, WMREG_EECD, reg);
9818 		CSR_WRITE_FLUSH(sc);
9819 		delay(2);
9820 	}
9821 }
9822 
9823 /*
9824  * wm_eeprom_recvbits:
9825  *
9826  *	Receive a series of bits from the EEPROM.
9827  */
9828 static void
9829 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9830 {
9831 	uint32_t reg, val;
9832 	int x;
9833 
9834 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9835 
9836 	val = 0;
9837 	for (x = nbits; x > 0; x--) {
9838 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9839 		CSR_WRITE_FLUSH(sc);
9840 		delay(2);
9841 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9842 			val |= (1U << (x - 1));
9843 		CSR_WRITE(sc, WMREG_EECD, reg);
9844 		CSR_WRITE_FLUSH(sc);
9845 		delay(2);
9846 	}
9847 	*valp = val;
9848 }
9849 
9850 /* Microwire */
9851 
9852 /*
9853  * wm_nvm_read_uwire:
9854  *
9855  *	Read a word from the EEPROM using the MicroWire protocol.
9856  */
9857 static int
9858 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9859 {
9860 	uint32_t reg, val;
9861 	int i;
9862 
9863 	for (i = 0; i < wordcnt; i++) {
9864 		/* Clear SK and DI. */
9865 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
9866 		CSR_WRITE(sc, WMREG_EECD, reg);
9867 
9868 		/*
9869 		 * XXX: workaround for a bug in qemu-0.12.x and prior
9870 		 * and Xen.
9871 		 *
9872 		 * We use this workaround only for 82540 because qemu's
9873 		 * e1000 act as 82540.
9874 		 */
9875 		if (sc->sc_type == WM_T_82540) {
9876 			reg |= EECD_SK;
9877 			CSR_WRITE(sc, WMREG_EECD, reg);
9878 			reg &= ~EECD_SK;
9879 			CSR_WRITE(sc, WMREG_EECD, reg);
9880 			CSR_WRITE_FLUSH(sc);
9881 			delay(2);
9882 		}
9883 		/* XXX: end of workaround */
9884 
9885 		/* Set CHIP SELECT. */
9886 		reg |= EECD_CS;
9887 		CSR_WRITE(sc, WMREG_EECD, reg);
9888 		CSR_WRITE_FLUSH(sc);
9889 		delay(2);
9890 
9891 		/* Shift in the READ command. */
9892 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
9893 
9894 		/* Shift in address. */
9895 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
9896 
9897 		/* Shift out the data. */
9898 		wm_eeprom_recvbits(sc, &val, 16);
9899 		data[i] = val & 0xffff;
9900 
9901 		/* Clear CHIP SELECT. */
9902 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
9903 		CSR_WRITE(sc, WMREG_EECD, reg);
9904 		CSR_WRITE_FLUSH(sc);
9905 		delay(2);
9906 	}
9907 
9908 	return 0;
9909 }
9910 
9911 /* SPI */
9912 
9913 /*
9914  * Set SPI and FLASH related information from the EECD register.
9915  * For 82541 and 82547, the word size is taken from EEPROM.
9916  */
9917 static int
9918 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
9919 {
9920 	int size;
9921 	uint32_t reg;
9922 	uint16_t data;
9923 
9924 	reg = CSR_READ(sc, WMREG_EECD);
9925 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
9926 
9927 	/* Read the size of NVM from EECD by default */
9928 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9929 	switch (sc->sc_type) {
9930 	case WM_T_82541:
9931 	case WM_T_82541_2:
9932 	case WM_T_82547:
9933 	case WM_T_82547_2:
9934 		/* Set dummy value to access EEPROM */
9935 		sc->sc_nvm_wordsize = 64;
9936 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
9937 		reg = data;
9938 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
9939 		if (size == 0)
9940 			size = 6; /* 64 word size */
9941 		else
9942 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
9943 		break;
9944 	case WM_T_80003:
9945 	case WM_T_82571:
9946 	case WM_T_82572:
9947 	case WM_T_82573: /* SPI case */
9948 	case WM_T_82574: /* SPI case */
9949 	case WM_T_82583: /* SPI case */
9950 		size += NVM_WORD_SIZE_BASE_SHIFT;
9951 		if (size > 14)
9952 			size = 14;
9953 		break;
9954 	case WM_T_82575:
9955 	case WM_T_82576:
9956 	case WM_T_82580:
9957 	case WM_T_I350:
9958 	case WM_T_I354:
9959 	case WM_T_I210:
9960 	case WM_T_I211:
9961 		size += NVM_WORD_SIZE_BASE_SHIFT;
9962 		if (size > 15)
9963 			size = 15;
9964 		break;
9965 	default:
9966 		aprint_error_dev(sc->sc_dev,
9967 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
9968 		return -1;
9969 		break;
9970 	}
9971 
9972 	sc->sc_nvm_wordsize = 1 << size;
9973 
9974 	return 0;
9975 }
9976 
9977 /*
9978  * wm_nvm_ready_spi:
9979  *
9980  *	Wait for a SPI EEPROM to be ready for commands.
9981  */
9982 static int
9983 wm_nvm_ready_spi(struct wm_softc *sc)
9984 {
9985 	uint32_t val;
9986 	int usec;
9987 
9988 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
9989 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
9990 		wm_eeprom_recvbits(sc, &val, 8);
9991 		if ((val & SPI_SR_RDY) == 0)
9992 			break;
9993 	}
9994 	if (usec >= SPI_MAX_RETRIES) {
9995 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
9996 		return 1;
9997 	}
9998 	return 0;
9999 }
10000 
10001 /*
10002  * wm_nvm_read_spi:
10003  *
10004  *	Read a work from the EEPROM using the SPI protocol.
10005  */
10006 static int
10007 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10008 {
10009 	uint32_t reg, val;
10010 	int i;
10011 	uint8_t opc;
10012 
10013 	/* Clear SK and CS. */
10014 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10015 	CSR_WRITE(sc, WMREG_EECD, reg);
10016 	CSR_WRITE_FLUSH(sc);
10017 	delay(2);
10018 
10019 	if (wm_nvm_ready_spi(sc))
10020 		return 1;
10021 
10022 	/* Toggle CS to flush commands. */
10023 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
10024 	CSR_WRITE_FLUSH(sc);
10025 	delay(2);
10026 	CSR_WRITE(sc, WMREG_EECD, reg);
10027 	CSR_WRITE_FLUSH(sc);
10028 	delay(2);
10029 
10030 	opc = SPI_OPC_READ;
10031 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
10032 		opc |= SPI_OPC_A8;
10033 
10034 	wm_eeprom_sendbits(sc, opc, 8);
10035 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
10036 
10037 	for (i = 0; i < wordcnt; i++) {
10038 		wm_eeprom_recvbits(sc, &val, 16);
10039 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
10040 	}
10041 
10042 	/* Raise CS and clear SK. */
10043 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
10044 	CSR_WRITE(sc, WMREG_EECD, reg);
10045 	CSR_WRITE_FLUSH(sc);
10046 	delay(2);
10047 
10048 	return 0;
10049 }
10050 
10051 /* Using with EERD */
10052 
10053 static int
10054 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
10055 {
10056 	uint32_t attempts = 100000;
10057 	uint32_t i, reg = 0;
10058 	int32_t done = -1;
10059 
10060 	for (i = 0; i < attempts; i++) {
10061 		reg = CSR_READ(sc, rw);
10062 
10063 		if (reg & EERD_DONE) {
10064 			done = 0;
10065 			break;
10066 		}
10067 		delay(5);
10068 	}
10069 
10070 	return done;
10071 }
10072 
10073 static int
10074 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
10075     uint16_t *data)
10076 {
10077 	int i, eerd = 0;
10078 	int error = 0;
10079 
10080 	for (i = 0; i < wordcnt; i++) {
10081 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
10082 
10083 		CSR_WRITE(sc, WMREG_EERD, eerd);
10084 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
10085 		if (error != 0)
10086 			break;
10087 
10088 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
10089 	}
10090 
10091 	return error;
10092 }
10093 
10094 /* Flash */
10095 
10096 static int
10097 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10098 {
10099 	uint32_t eecd;
10100 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10101 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10102 	uint8_t sig_byte = 0;
10103 
10104 	switch (sc->sc_type) {
10105 	case WM_T_PCH_SPT:
10106 		/*
10107 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
10108 		 * sector valid bits from the NVM.
10109 		 */
10110 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
10111 		if ((*bank == 0) || (*bank == 1)) {
10112 			aprint_error_dev(sc->sc_dev,
10113 					 "%s: no valid NVM bank present\n",
10114 				__func__);
10115 			return -1;
10116 		} else {
10117 			*bank = *bank - 2;
10118 			return 0;
10119 		}
10120 	case WM_T_ICH8:
10121 	case WM_T_ICH9:
10122 		eecd = CSR_READ(sc, WMREG_EECD);
10123 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10124 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10125 			return 0;
10126 		}
10127 		/* FALLTHROUGH */
10128 	default:
10129 		/* Default to 0 */
10130 		*bank = 0;
10131 
10132 		/* Check bank 0 */
10133 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
10134 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10135 			*bank = 0;
10136 			return 0;
10137 		}
10138 
10139 		/* Check bank 1 */
10140 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
10141 		    &sig_byte);
10142 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10143 			*bank = 1;
10144 			return 0;
10145 		}
10146 	}
10147 
10148 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10149 		device_xname(sc->sc_dev)));
10150 	return -1;
10151 }
10152 
10153 /******************************************************************************
10154  * This function does initial flash setup so that a new read/write/erase cycle
10155  * can be started.
10156  *
10157  * sc - The pointer to the hw structure
10158  ****************************************************************************/
10159 static int32_t
10160 wm_ich8_cycle_init(struct wm_softc *sc)
10161 {
10162 	uint16_t hsfsts;
10163 	int32_t error = 1;
10164 	int32_t i     = 0;
10165 
10166 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10167 
10168 	/* May be check the Flash Des Valid bit in Hw status */
10169 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10170 		return error;
10171 	}
10172 
10173 	/* Clear FCERR in Hw status by writing 1 */
10174 	/* Clear DAEL in Hw status by writing a 1 */
10175 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10176 
10177 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10178 
10179 	/*
10180 	 * Either we should have a hardware SPI cycle in progress bit to check
10181 	 * against, in order to start a new cycle or FDONE bit should be
10182 	 * changed in the hardware so that it is 1 after harware reset, which
10183 	 * can then be used as an indication whether a cycle is in progress or
10184 	 * has been completed .. we should also have some software semaphore
10185 	 * mechanism to guard FDONE or the cycle in progress bit so that two
10186 	 * threads access to those bits can be sequentiallized or a way so that
10187 	 * 2 threads dont start the cycle at the same time
10188 	 */
10189 
10190 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10191 		/*
10192 		 * There is no cycle running at present, so we can start a
10193 		 * cycle
10194 		 */
10195 
10196 		/* Begin by setting Flash Cycle Done. */
10197 		hsfsts |= HSFSTS_DONE;
10198 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10199 		error = 0;
10200 	} else {
10201 		/*
10202 		 * otherwise poll for sometime so the current cycle has a
10203 		 * chance to end before giving up.
10204 		 */
10205 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10206 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10207 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10208 				error = 0;
10209 				break;
10210 			}
10211 			delay(1);
10212 		}
10213 		if (error == 0) {
10214 			/*
10215 			 * Successful in waiting for previous cycle to timeout,
10216 			 * now set the Flash Cycle Done.
10217 			 */
10218 			hsfsts |= HSFSTS_DONE;
10219 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10220 		}
10221 	}
10222 	return error;
10223 }
10224 
10225 /******************************************************************************
10226  * This function starts a flash cycle and waits for its completion
10227  *
10228  * sc - The pointer to the hw structure
10229  ****************************************************************************/
10230 static int32_t
10231 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10232 {
10233 	uint16_t hsflctl;
10234 	uint16_t hsfsts;
10235 	int32_t error = 1;
10236 	uint32_t i = 0;
10237 
10238 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10239 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10240 	hsflctl |= HSFCTL_GO;
10241 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10242 
10243 	/* Wait till FDONE bit is set to 1 */
10244 	do {
10245 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10246 		if (hsfsts & HSFSTS_DONE)
10247 			break;
10248 		delay(1);
10249 		i++;
10250 	} while (i < timeout);
10251 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10252 		error = 0;
10253 
10254 	return error;
10255 }
10256 
10257 /******************************************************************************
10258  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
10259  *
10260  * sc - The pointer to the hw structure
10261  * index - The index of the byte or word to read.
10262  * size - Size of data to read, 1=byte 2=word, 4=dword
10263  * data - Pointer to the word to store the value read.
10264  *****************************************************************************/
10265 static int32_t
10266 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10267     uint32_t size, uint32_t *data)
10268 {
10269 	uint16_t hsfsts;
10270 	uint16_t hsflctl;
10271 	uint32_t flash_linear_address;
10272 	uint32_t flash_data = 0;
10273 	int32_t error = 1;
10274 	int32_t count = 0;
10275 
10276 	if (size < 1  || size > 4 || data == 0x0 ||
10277 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
10278 		return error;
10279 
10280 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10281 	    sc->sc_ich8_flash_base;
10282 
10283 	do {
10284 		delay(1);
10285 		/* Steps */
10286 		error = wm_ich8_cycle_init(sc);
10287 		if (error)
10288 			break;
10289 
10290 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10291 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10292 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10293 		    & HSFCTL_BCOUNT_MASK;
10294 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10295 		if (sc->sc_type == WM_T_PCH_SPT) {
10296 			/*
10297 			 * In SPT, This register is in Lan memory space, not
10298 			 * flash. Therefore, only 32 bit access is supported.
10299 			 */
10300 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
10301 			    (uint32_t)hsflctl);
10302 		} else
10303 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10304 
10305 		/*
10306 		 * Write the last 24 bits of index into Flash Linear address
10307 		 * field in Flash Address
10308 		 */
10309 		/* TODO: TBD maybe check the index against the size of flash */
10310 
10311 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10312 
10313 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10314 
10315 		/*
10316 		 * Check if FCERR is set to 1, if set to 1, clear it and try
10317 		 * the whole sequence a few more times, else read in (shift in)
10318 		 * the Flash Data0, the order is least significant byte first
10319 		 * msb to lsb
10320 		 */
10321 		if (error == 0) {
10322 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10323 			if (size == 1)
10324 				*data = (uint8_t)(flash_data & 0x000000FF);
10325 			else if (size == 2)
10326 				*data = (uint16_t)(flash_data & 0x0000FFFF);
10327 			else if (size == 4)
10328 				*data = (uint32_t)flash_data;
10329 			break;
10330 		} else {
10331 			/*
10332 			 * If we've gotten here, then things are probably
10333 			 * completely hosed, but if the error condition is
10334 			 * detected, it won't hurt to give it another try...
10335 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10336 			 */
10337 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10338 			if (hsfsts & HSFSTS_ERR) {
10339 				/* Repeat for some time before giving up. */
10340 				continue;
10341 			} else if ((hsfsts & HSFSTS_DONE) == 0)
10342 				break;
10343 		}
10344 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10345 
10346 	return error;
10347 }
10348 
10349 /******************************************************************************
10350  * Reads a single byte from the NVM using the ICH8 flash access registers.
10351  *
10352  * sc - pointer to wm_hw structure
10353  * index - The index of the byte to read.
10354  * data - Pointer to a byte to store the value read.
10355  *****************************************************************************/
10356 static int32_t
10357 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10358 {
10359 	int32_t status;
10360 	uint32_t word = 0;
10361 
10362 	status = wm_read_ich8_data(sc, index, 1, &word);
10363 	if (status == 0)
10364 		*data = (uint8_t)word;
10365 	else
10366 		*data = 0;
10367 
10368 	return status;
10369 }
10370 
10371 /******************************************************************************
10372  * Reads a word from the NVM using the ICH8 flash access registers.
10373  *
10374  * sc - pointer to wm_hw structure
10375  * index - The starting byte index of the word to read.
10376  * data - Pointer to a word to store the value read.
10377  *****************************************************************************/
10378 static int32_t
10379 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10380 {
10381 	int32_t status;
10382 	uint32_t word = 0;
10383 
10384 	status = wm_read_ich8_data(sc, index, 2, &word);
10385 	if (status == 0)
10386 		*data = (uint16_t)word;
10387 	else
10388 		*data = 0;
10389 
10390 	return status;
10391 }
10392 
10393 /******************************************************************************
10394  * Reads a dword from the NVM using the ICH8 flash access registers.
10395  *
10396  * sc - pointer to wm_hw structure
10397  * index - The starting byte index of the word to read.
10398  * data - Pointer to a word to store the value read.
10399  *****************************************************************************/
10400 static int32_t
10401 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
10402 {
10403 	int32_t status;
10404 
10405 	status = wm_read_ich8_data(sc, index, 4, data);
10406 	return status;
10407 }
10408 
10409 /******************************************************************************
10410  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10411  * register.
10412  *
10413  * sc - Struct containing variables accessed by shared code
10414  * offset - offset of word in the EEPROM to read
10415  * data - word read from the EEPROM
10416  * words - number of words to read
10417  *****************************************************************************/
10418 static int
10419 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10420 {
10421 	int32_t  error = 0;
10422 	uint32_t flash_bank = 0;
10423 	uint32_t act_offset = 0;
10424 	uint32_t bank_offset = 0;
10425 	uint16_t word = 0;
10426 	uint16_t i = 0;
10427 
10428 	/*
10429 	 * We need to know which is the valid flash bank.  In the event
10430 	 * that we didn't allocate eeprom_shadow_ram, we may not be
10431 	 * managing flash_bank.  So it cannot be trusted and needs
10432 	 * to be updated with each read.
10433 	 */
10434 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10435 	if (error) {
10436 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10437 			device_xname(sc->sc_dev)));
10438 		flash_bank = 0;
10439 	}
10440 
10441 	/*
10442 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
10443 	 * size
10444 	 */
10445 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10446 
10447 	error = wm_get_swfwhw_semaphore(sc);
10448 	if (error) {
10449 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10450 		    __func__);
10451 		return error;
10452 	}
10453 
10454 	for (i = 0; i < words; i++) {
10455 		/* The NVM part needs a byte offset, hence * 2 */
10456 		act_offset = bank_offset + ((offset + i) * 2);
10457 		error = wm_read_ich8_word(sc, act_offset, &word);
10458 		if (error) {
10459 			aprint_error_dev(sc->sc_dev,
10460 			    "%s: failed to read NVM\n", __func__);
10461 			break;
10462 		}
10463 		data[i] = word;
10464 	}
10465 
10466 	wm_put_swfwhw_semaphore(sc);
10467 	return error;
10468 }
10469 
10470 /******************************************************************************
10471  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
10472  * register.
10473  *
10474  * sc - Struct containing variables accessed by shared code
10475  * offset - offset of word in the EEPROM to read
10476  * data - word read from the EEPROM
10477  * words - number of words to read
10478  *****************************************************************************/
10479 static int
10480 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
10481 {
10482 	int32_t  error = 0;
10483 	uint32_t flash_bank = 0;
10484 	uint32_t act_offset = 0;
10485 	uint32_t bank_offset = 0;
10486 	uint32_t dword = 0;
10487 	uint16_t i = 0;
10488 
10489 	/*
10490 	 * We need to know which is the valid flash bank.  In the event
10491 	 * that we didn't allocate eeprom_shadow_ram, we may not be
10492 	 * managing flash_bank.  So it cannot be trusted and needs
10493 	 * to be updated with each read.
10494 	 */
10495 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10496 	if (error) {
10497 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10498 			device_xname(sc->sc_dev)));
10499 		flash_bank = 0;
10500 	}
10501 
10502 	/*
10503 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
10504 	 * size
10505 	 */
10506 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10507 
10508 	error = wm_get_swfwhw_semaphore(sc);
10509 	if (error) {
10510 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10511 		    __func__);
10512 		return error;
10513 	}
10514 
10515 	for (i = 0; i < words; i++) {
10516 		/* The NVM part needs a byte offset, hence * 2 */
10517 		act_offset = bank_offset + ((offset + i) * 2);
10518 		/* but we must read dword aligned, so mask ... */
10519 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
10520 		if (error) {
10521 			aprint_error_dev(sc->sc_dev,
10522 			    "%s: failed to read NVM\n", __func__);
10523 			break;
10524 		}
10525 		/* ... and pick out low or high word */
10526 		if ((act_offset & 0x2) == 0)
10527 			data[i] = (uint16_t)(dword & 0xFFFF);
10528 		else
10529 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
10530 	}
10531 
10532 	wm_put_swfwhw_semaphore(sc);
10533 	return error;
10534 }
10535 
10536 /* iNVM */
10537 
10538 static int
10539 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10540 {
10541 	int32_t  rv = 0;
10542 	uint32_t invm_dword;
10543 	uint16_t i;
10544 	uint8_t record_type, word_address;
10545 
10546 	for (i = 0; i < INVM_SIZE; i++) {
10547 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10548 		/* Get record type */
10549 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10550 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10551 			break;
10552 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10553 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10554 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10555 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10556 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10557 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10558 			if (word_address == address) {
10559 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10560 				rv = 0;
10561 				break;
10562 			}
10563 		}
10564 	}
10565 
10566 	return rv;
10567 }
10568 
10569 static int
10570 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10571 {
10572 	int rv = 0;
10573 	int i;
10574 
10575 	for (i = 0; i < words; i++) {
10576 		switch (offset + i) {
10577 		case NVM_OFF_MACADDR:
10578 		case NVM_OFF_MACADDR1:
10579 		case NVM_OFF_MACADDR2:
10580 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10581 			if (rv != 0) {
10582 				data[i] = 0xffff;
10583 				rv = -1;
10584 			}
10585 			break;
10586 		case NVM_OFF_CFG2:
10587 			rv = wm_nvm_read_word_invm(sc, offset, data);
10588 			if (rv != 0) {
10589 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
10590 				rv = 0;
10591 			}
10592 			break;
10593 		case NVM_OFF_CFG4:
10594 			rv = wm_nvm_read_word_invm(sc, offset, data);
10595 			if (rv != 0) {
10596 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
10597 				rv = 0;
10598 			}
10599 			break;
10600 		case NVM_OFF_LED_1_CFG:
10601 			rv = wm_nvm_read_word_invm(sc, offset, data);
10602 			if (rv != 0) {
10603 				*data = NVM_LED_1_CFG_DEFAULT_I211;
10604 				rv = 0;
10605 			}
10606 			break;
10607 		case NVM_OFF_LED_0_2_CFG:
10608 			rv = wm_nvm_read_word_invm(sc, offset, data);
10609 			if (rv != 0) {
10610 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
10611 				rv = 0;
10612 			}
10613 			break;
10614 		case NVM_OFF_ID_LED_SETTINGS:
10615 			rv = wm_nvm_read_word_invm(sc, offset, data);
10616 			if (rv != 0) {
10617 				*data = ID_LED_RESERVED_FFFF;
10618 				rv = 0;
10619 			}
10620 			break;
10621 		default:
10622 			DPRINTF(WM_DEBUG_NVM,
10623 			    ("NVM word 0x%02x is not mapped.\n", offset));
10624 			*data = NVM_RESERVED_WORD;
10625 			break;
10626 		}
10627 	}
10628 
10629 	return rv;
10630 }
10631 
10632 /* Lock, detecting NVM type, validate checksum, version and read */
10633 
10634 /*
10635  * wm_nvm_acquire:
10636  *
10637  *	Perform the EEPROM handshake required on some chips.
10638  */
10639 static int
10640 wm_nvm_acquire(struct wm_softc *sc)
10641 {
10642 	uint32_t reg;
10643 	int x;
10644 	int ret = 0;
10645 
10646 	/* always success */
10647 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10648 		return 0;
10649 
10650 	if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10651 		ret = wm_get_swfwhw_semaphore(sc);
10652 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10653 		/* This will also do wm_get_swsm_semaphore() if needed */
10654 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10655 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10656 		ret = wm_get_swsm_semaphore(sc);
10657 	}
10658 
10659 	if (ret) {
10660 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10661 			__func__);
10662 		return 1;
10663 	}
10664 
10665 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10666 		reg = CSR_READ(sc, WMREG_EECD);
10667 
10668 		/* Request EEPROM access. */
10669 		reg |= EECD_EE_REQ;
10670 		CSR_WRITE(sc, WMREG_EECD, reg);
10671 
10672 		/* ..and wait for it to be granted. */
10673 		for (x = 0; x < 1000; x++) {
10674 			reg = CSR_READ(sc, WMREG_EECD);
10675 			if (reg & EECD_EE_GNT)
10676 				break;
10677 			delay(5);
10678 		}
10679 		if ((reg & EECD_EE_GNT) == 0) {
10680 			aprint_error_dev(sc->sc_dev,
10681 			    "could not acquire EEPROM GNT\n");
10682 			reg &= ~EECD_EE_REQ;
10683 			CSR_WRITE(sc, WMREG_EECD, reg);
10684 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10685 				wm_put_swfwhw_semaphore(sc);
10686 			if (sc->sc_flags & WM_F_LOCK_SWFW)
10687 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10688 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
10689 				wm_put_swsm_semaphore(sc);
10690 			return 1;
10691 		}
10692 	}
10693 
10694 	return 0;
10695 }
10696 
10697 /*
10698  * wm_nvm_release:
10699  *
10700  *	Release the EEPROM mutex.
10701  */
10702 static void
10703 wm_nvm_release(struct wm_softc *sc)
10704 {
10705 	uint32_t reg;
10706 
10707 	/* always success */
10708 	if ((sc->sc_flags & WM_F_EEPROM_FLASH) != 0)
10709 		return;
10710 
10711 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10712 		reg = CSR_READ(sc, WMREG_EECD);
10713 		reg &= ~EECD_EE_REQ;
10714 		CSR_WRITE(sc, WMREG_EECD, reg);
10715 	}
10716 
10717 	if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10718 		wm_put_swfwhw_semaphore(sc);
10719 	if (sc->sc_flags & WM_F_LOCK_SWFW)
10720 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10721 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
10722 		wm_put_swsm_semaphore(sc);
10723 }
10724 
10725 static int
10726 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10727 {
10728 	uint32_t eecd = 0;
10729 
10730 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10731 	    || sc->sc_type == WM_T_82583) {
10732 		eecd = CSR_READ(sc, WMREG_EECD);
10733 
10734 		/* Isolate bits 15 & 16 */
10735 		eecd = ((eecd >> 15) & 0x03);
10736 
10737 		/* If both bits are set, device is Flash type */
10738 		if (eecd == 0x03)
10739 			return 0;
10740 	}
10741 	return 1;
10742 }
10743 
10744 static int
10745 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10746 {
10747 	uint32_t eec;
10748 
10749 	eec = CSR_READ(sc, WMREG_EEC);
10750 	if ((eec & EEC_FLASH_DETECTED) != 0)
10751 		return 1;
10752 
10753 	return 0;
10754 }
10755 
10756 /*
10757  * wm_nvm_validate_checksum
10758  *
10759  * The checksum is defined as the sum of the first 64 (16 bit) words.
10760  */
10761 static int
10762 wm_nvm_validate_checksum(struct wm_softc *sc)
10763 {
10764 	uint16_t checksum;
10765 	uint16_t eeprom_data;
10766 #ifdef WM_DEBUG
10767 	uint16_t csum_wordaddr, valid_checksum;
10768 #endif
10769 	int i;
10770 
10771 	checksum = 0;
10772 
10773 	/* Don't check for I211 */
10774 	if (sc->sc_type == WM_T_I211)
10775 		return 0;
10776 
10777 #ifdef WM_DEBUG
10778 	if (sc->sc_type == WM_T_PCH_LPT) {
10779 		csum_wordaddr = NVM_OFF_COMPAT;
10780 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10781 	} else {
10782 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10783 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10784 	}
10785 
10786 	/* Dump EEPROM image for debug */
10787 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10788 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10789 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10790 		/* XXX PCH_SPT? */
10791 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10792 		if ((eeprom_data & valid_checksum) == 0) {
10793 			DPRINTF(WM_DEBUG_NVM,
10794 			    ("%s: NVM need to be updated (%04x != %04x)\n",
10795 				device_xname(sc->sc_dev), eeprom_data,
10796 				    valid_checksum));
10797 		}
10798 	}
10799 
10800 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
10801 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10802 		for (i = 0; i < NVM_SIZE; i++) {
10803 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
10804 				printf("XXXX ");
10805 			else
10806 				printf("%04hx ", eeprom_data);
10807 			if (i % 8 == 7)
10808 				printf("\n");
10809 		}
10810 	}
10811 
10812 #endif /* WM_DEBUG */
10813 
10814 	for (i = 0; i < NVM_SIZE; i++) {
10815 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
10816 			return 1;
10817 		checksum += eeprom_data;
10818 	}
10819 
10820 	if (checksum != (uint16_t) NVM_CHECKSUM) {
10821 #ifdef WM_DEBUG
10822 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10823 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10824 #endif
10825 	}
10826 
10827 	return 0;
10828 }
10829 
10830 static void
10831 wm_nvm_version_invm(struct wm_softc *sc)
10832 {
10833 	uint32_t dword;
10834 
10835 	/*
10836 	 * Linux's code to decode version is very strange, so we don't
10837 	 * obey that algorithm and just use word 61 as the document.
10838 	 * Perhaps it's not perfect though...
10839 	 *
10840 	 * Example:
10841 	 *
10842 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
10843 	 */
10844 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
10845 	dword = __SHIFTOUT(dword, INVM_VER_1);
10846 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
10847 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
10848 }
10849 
10850 static void
10851 wm_nvm_version(struct wm_softc *sc)
10852 {
10853 	uint16_t major, minor, build, patch;
10854 	uint16_t uid0, uid1;
10855 	uint16_t nvm_data;
10856 	uint16_t off;
10857 	bool check_version = false;
10858 	bool check_optionrom = false;
10859 	bool have_build = false;
10860 
10861 	/*
10862 	 * Version format:
10863 	 *
10864 	 * XYYZ
10865 	 * X0YZ
10866 	 * X0YY
10867 	 *
10868 	 * Example:
10869 	 *
10870 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
10871 	 *	82571	0x50a6	5.10.6?
10872 	 *	82572	0x506a	5.6.10?
10873 	 *	82572EI	0x5069	5.6.9?
10874 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
10875 	 *		0x2013	2.1.3?
10876 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
10877 	 */
10878 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
10879 	switch (sc->sc_type) {
10880 	case WM_T_82571:
10881 	case WM_T_82572:
10882 	case WM_T_82574:
10883 	case WM_T_82583:
10884 		check_version = true;
10885 		check_optionrom = true;
10886 		have_build = true;
10887 		break;
10888 	case WM_T_82575:
10889 	case WM_T_82576:
10890 	case WM_T_82580:
10891 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
10892 			check_version = true;
10893 		break;
10894 	case WM_T_I211:
10895 		wm_nvm_version_invm(sc);
10896 		goto printver;
10897 	case WM_T_I210:
10898 		if (!wm_nvm_get_flash_presence_i210(sc)) {
10899 			wm_nvm_version_invm(sc);
10900 			goto printver;
10901 		}
10902 		/* FALLTHROUGH */
10903 	case WM_T_I350:
10904 	case WM_T_I354:
10905 		check_version = true;
10906 		check_optionrom = true;
10907 		break;
10908 	default:
10909 		return;
10910 	}
10911 	if (check_version) {
10912 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
10913 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
10914 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
10915 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
10916 			build = nvm_data & NVM_BUILD_MASK;
10917 			have_build = true;
10918 		} else
10919 			minor = nvm_data & 0x00ff;
10920 
10921 		/* Decimal */
10922 		minor = (minor / 16) * 10 + (minor % 16);
10923 		sc->sc_nvm_ver_major = major;
10924 		sc->sc_nvm_ver_minor = minor;
10925 
10926 printver:
10927 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
10928 		    sc->sc_nvm_ver_minor);
10929 		if (have_build) {
10930 			sc->sc_nvm_ver_build = build;
10931 			aprint_verbose(".%d", build);
10932 		}
10933 	}
10934 	if (check_optionrom) {
10935 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
10936 		/* Option ROM Version */
10937 		if ((off != 0x0000) && (off != 0xffff)) {
10938 			off += NVM_COMBO_VER_OFF;
10939 			wm_nvm_read(sc, off + 1, 1, &uid1);
10940 			wm_nvm_read(sc, off, 1, &uid0);
10941 			if ((uid0 != 0) && (uid0 != 0xffff)
10942 			    && (uid1 != 0) && (uid1 != 0xffff)) {
10943 				/* 16bits */
10944 				major = uid0 >> 8;
10945 				build = (uid0 << 8) | (uid1 >> 8);
10946 				patch = uid1 & 0x00ff;
10947 				aprint_verbose(", option ROM Version %d.%d.%d",
10948 				    major, build, patch);
10949 			}
10950 		}
10951 	}
10952 
10953 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
10954 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
10955 }
10956 
10957 /*
10958  * wm_nvm_read:
10959  *
10960  *	Read data from the serial EEPROM.
10961  */
10962 static int
10963 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10964 {
10965 	int rv;
10966 
10967 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
10968 		return 1;
10969 
10970 	if (wm_nvm_acquire(sc))
10971 		return 1;
10972 
10973 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10974 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10975 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
10976 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
10977 	else if (sc->sc_type == WM_T_PCH_SPT)
10978 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
10979 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
10980 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
10981 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
10982 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
10983 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
10984 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
10985 	else
10986 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
10987 
10988 	wm_nvm_release(sc);
10989 	return rv;
10990 }
10991 
10992 /*
10993  * Hardware semaphores.
10994  * Very complexed...
10995  */
10996 
10997 static int
10998 wm_get_swsm_semaphore(struct wm_softc *sc)
10999 {
11000 	int32_t timeout;
11001 	uint32_t swsm;
11002 
11003 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
11004 		/* Get the SW semaphore. */
11005 		timeout = sc->sc_nvm_wordsize + 1;
11006 		while (timeout) {
11007 			swsm = CSR_READ(sc, WMREG_SWSM);
11008 
11009 			if ((swsm & SWSM_SMBI) == 0)
11010 				break;
11011 
11012 			delay(50);
11013 			timeout--;
11014 		}
11015 
11016 		if (timeout == 0) {
11017 			aprint_error_dev(sc->sc_dev,
11018 			    "could not acquire SWSM SMBI\n");
11019 			return 1;
11020 		}
11021 	}
11022 
11023 	/* Get the FW semaphore. */
11024 	timeout = sc->sc_nvm_wordsize + 1;
11025 	while (timeout) {
11026 		swsm = CSR_READ(sc, WMREG_SWSM);
11027 		swsm |= SWSM_SWESMBI;
11028 		CSR_WRITE(sc, WMREG_SWSM, swsm);
11029 		/* If we managed to set the bit we got the semaphore. */
11030 		swsm = CSR_READ(sc, WMREG_SWSM);
11031 		if (swsm & SWSM_SWESMBI)
11032 			break;
11033 
11034 		delay(50);
11035 		timeout--;
11036 	}
11037 
11038 	if (timeout == 0) {
11039 		aprint_error_dev(sc->sc_dev,
11040 		    "could not acquire SWSM SWESMBI\n");
11041 		/* Release semaphores */
11042 		wm_put_swsm_semaphore(sc);
11043 		return 1;
11044 	}
11045 	return 0;
11046 }
11047 
11048 static void
11049 wm_put_swsm_semaphore(struct wm_softc *sc)
11050 {
11051 	uint32_t swsm;
11052 
11053 	swsm = CSR_READ(sc, WMREG_SWSM);
11054 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
11055 	CSR_WRITE(sc, WMREG_SWSM, swsm);
11056 }
11057 
11058 static int
11059 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11060 {
11061 	uint32_t swfw_sync;
11062 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
11063 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
11064 	int timeout = 200;
11065 
11066 	for (timeout = 0; timeout < 200; timeout++) {
11067 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
11068 			if (wm_get_swsm_semaphore(sc)) {
11069 				aprint_error_dev(sc->sc_dev,
11070 				    "%s: failed to get semaphore\n",
11071 				    __func__);
11072 				return 1;
11073 			}
11074 		}
11075 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11076 		if ((swfw_sync & (swmask | fwmask)) == 0) {
11077 			swfw_sync |= swmask;
11078 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11079 			if (sc->sc_flags & WM_F_LOCK_SWSM)
11080 				wm_put_swsm_semaphore(sc);
11081 			return 0;
11082 		}
11083 		if (sc->sc_flags & WM_F_LOCK_SWSM)
11084 			wm_put_swsm_semaphore(sc);
11085 		delay(5000);
11086 	}
11087 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
11088 	    device_xname(sc->sc_dev), mask, swfw_sync);
11089 	return 1;
11090 }
11091 
11092 static void
11093 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11094 {
11095 	uint32_t swfw_sync;
11096 
11097 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
11098 		while (wm_get_swsm_semaphore(sc) != 0)
11099 			continue;
11100 	}
11101 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11102 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
11103 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11104 	if (sc->sc_flags & WM_F_LOCK_SWSM)
11105 		wm_put_swsm_semaphore(sc);
11106 }
11107 
11108 static int
11109 wm_get_swfwhw_semaphore(struct wm_softc *sc)
11110 {
11111 	uint32_t ext_ctrl;
11112 	int timeout = 200;
11113 
11114 	for (timeout = 0; timeout < 200; timeout++) {
11115 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11116 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11117 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11118 
11119 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11120 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11121 			return 0;
11122 		delay(5000);
11123 	}
11124 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
11125 	    device_xname(sc->sc_dev), ext_ctrl);
11126 	return 1;
11127 }
11128 
11129 static void
11130 wm_put_swfwhw_semaphore(struct wm_softc *sc)
11131 {
11132 	uint32_t ext_ctrl;
11133 
11134 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11135 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11136 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11137 }
11138 
11139 static int
11140 wm_get_hw_semaphore_82573(struct wm_softc *sc)
11141 {
11142 	int i = 0;
11143 	uint32_t reg;
11144 
11145 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11146 	do {
11147 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
11148 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
11149 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11150 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
11151 			break;
11152 		delay(2*1000);
11153 		i++;
11154 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
11155 
11156 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
11157 		wm_put_hw_semaphore_82573(sc);
11158 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
11159 		    device_xname(sc->sc_dev));
11160 		return -1;
11161 	}
11162 
11163 	return 0;
11164 }
11165 
11166 static void
11167 wm_put_hw_semaphore_82573(struct wm_softc *sc)
11168 {
11169 	uint32_t reg;
11170 
11171 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11172 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11173 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11174 }
11175 
11176 /*
11177  * Management mode and power management related subroutines.
11178  * BMC, AMT, suspend/resume and EEE.
11179  */
11180 
11181 #ifdef WM_WOL
11182 static int
11183 wm_check_mng_mode(struct wm_softc *sc)
11184 {
11185 	int rv;
11186 
11187 	switch (sc->sc_type) {
11188 	case WM_T_ICH8:
11189 	case WM_T_ICH9:
11190 	case WM_T_ICH10:
11191 	case WM_T_PCH:
11192 	case WM_T_PCH2:
11193 	case WM_T_PCH_LPT:
11194 	case WM_T_PCH_SPT:
11195 		rv = wm_check_mng_mode_ich8lan(sc);
11196 		break;
11197 	case WM_T_82574:
11198 	case WM_T_82583:
11199 		rv = wm_check_mng_mode_82574(sc);
11200 		break;
11201 	case WM_T_82571:
11202 	case WM_T_82572:
11203 	case WM_T_82573:
11204 	case WM_T_80003:
11205 		rv = wm_check_mng_mode_generic(sc);
11206 		break;
11207 	default:
11208 		/* noting to do */
11209 		rv = 0;
11210 		break;
11211 	}
11212 
11213 	return rv;
11214 }
11215 
11216 static int
11217 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11218 {
11219 	uint32_t fwsm;
11220 
11221 	fwsm = CSR_READ(sc, WMREG_FWSM);
11222 
11223 	if (((fwsm & FWSM_FW_VALID) != 0)
11224 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11225 		return 1;
11226 
11227 	return 0;
11228 }
11229 
11230 static int
11231 wm_check_mng_mode_82574(struct wm_softc *sc)
11232 {
11233 	uint16_t data;
11234 
11235 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11236 
11237 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
11238 		return 1;
11239 
11240 	return 0;
11241 }
11242 
11243 static int
11244 wm_check_mng_mode_generic(struct wm_softc *sc)
11245 {
11246 	uint32_t fwsm;
11247 
11248 	fwsm = CSR_READ(sc, WMREG_FWSM);
11249 
11250 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11251 		return 1;
11252 
11253 	return 0;
11254 }
11255 #endif /* WM_WOL */
11256 
11257 static int
11258 wm_enable_mng_pass_thru(struct wm_softc *sc)
11259 {
11260 	uint32_t manc, fwsm, factps;
11261 
11262 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11263 		return 0;
11264 
11265 	manc = CSR_READ(sc, WMREG_MANC);
11266 
11267 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11268 		device_xname(sc->sc_dev), manc));
11269 	if ((manc & MANC_RECV_TCO_EN) == 0)
11270 		return 0;
11271 
11272 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11273 		fwsm = CSR_READ(sc, WMREG_FWSM);
11274 		factps = CSR_READ(sc, WMREG_FACTPS);
11275 		if (((factps & FACTPS_MNGCG) == 0)
11276 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11277 			return 1;
11278 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11279 		uint16_t data;
11280 
11281 		factps = CSR_READ(sc, WMREG_FACTPS);
11282 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11283 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11284 			device_xname(sc->sc_dev), factps, data));
11285 		if (((factps & FACTPS_MNGCG) == 0)
11286 		    && ((data & NVM_CFG2_MNGM_MASK)
11287 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11288 			return 1;
11289 	} else if (((manc & MANC_SMBUS_EN) != 0)
11290 	    && ((manc & MANC_ASF_EN) == 0))
11291 		return 1;
11292 
11293 	return 0;
11294 }
11295 
11296 static bool
11297 wm_phy_resetisblocked(struct wm_softc *sc)
11298 {
11299 	bool blocked = false;
11300 	uint32_t reg;
11301 	int i = 0;
11302 
11303 	switch (sc->sc_type) {
11304 	case WM_T_ICH8:
11305 	case WM_T_ICH9:
11306 	case WM_T_ICH10:
11307 	case WM_T_PCH:
11308 	case WM_T_PCH2:
11309 	case WM_T_PCH_LPT:
11310 	case WM_T_PCH_SPT:
11311 		do {
11312 			reg = CSR_READ(sc, WMREG_FWSM);
11313 			if ((reg & FWSM_RSPCIPHY) == 0) {
11314 				blocked = true;
11315 				delay(10*1000);
11316 				continue;
11317 			}
11318 			blocked = false;
11319 		} while (blocked && (i++ < 10));
11320 		return blocked;
11321 		break;
11322 	case WM_T_82571:
11323 	case WM_T_82572:
11324 	case WM_T_82573:
11325 	case WM_T_82574:
11326 	case WM_T_82583:
11327 	case WM_T_80003:
11328 		reg = CSR_READ(sc, WMREG_MANC);
11329 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11330 			return true;
11331 		else
11332 			return false;
11333 		break;
11334 	default:
11335 		/* no problem */
11336 		break;
11337 	}
11338 
11339 	return false;
11340 }
11341 
11342 static void
11343 wm_get_hw_control(struct wm_softc *sc)
11344 {
11345 	uint32_t reg;
11346 
11347 	switch (sc->sc_type) {
11348 	case WM_T_82573:
11349 		reg = CSR_READ(sc, WMREG_SWSM);
11350 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11351 		break;
11352 	case WM_T_82571:
11353 	case WM_T_82572:
11354 	case WM_T_82574:
11355 	case WM_T_82583:
11356 	case WM_T_80003:
11357 	case WM_T_ICH8:
11358 	case WM_T_ICH9:
11359 	case WM_T_ICH10:
11360 	case WM_T_PCH:
11361 	case WM_T_PCH2:
11362 	case WM_T_PCH_LPT:
11363 	case WM_T_PCH_SPT:
11364 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11365 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11366 		break;
11367 	default:
11368 		break;
11369 	}
11370 }
11371 
11372 static void
11373 wm_release_hw_control(struct wm_softc *sc)
11374 {
11375 	uint32_t reg;
11376 
11377 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11378 		return;
11379 
11380 	if (sc->sc_type == WM_T_82573) {
11381 		reg = CSR_READ(sc, WMREG_SWSM);
11382 		reg &= ~SWSM_DRV_LOAD;
11383 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11384 	} else {
11385 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11386 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11387 	}
11388 }
11389 
11390 static void
11391 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
11392 {
11393 	uint32_t reg;
11394 
11395 	if (sc->sc_type < WM_T_PCH2)
11396 		return;
11397 
11398 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11399 
11400 	if (gate)
11401 		reg |= EXTCNFCTR_GATE_PHY_CFG;
11402 	else
11403 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11404 
11405 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11406 }
11407 
11408 static void
11409 wm_smbustopci(struct wm_softc *sc)
11410 {
11411 	uint32_t fwsm, reg;
11412 
11413 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
11414 	wm_gate_hw_phy_config_ich8lan(sc, true);
11415 
11416 	/* Acquire semaphore */
11417 	wm_get_swfwhw_semaphore(sc);
11418 
11419 	fwsm = CSR_READ(sc, WMREG_FWSM);
11420 	if (((fwsm & FWSM_FW_VALID) == 0)
11421 	    && ((wm_phy_resetisblocked(sc) == false))) {
11422 		if (sc->sc_type >= WM_T_PCH_LPT) {
11423 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
11424 			reg |= CTRL_EXT_FORCE_SMBUS;
11425 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11426 			CSR_WRITE_FLUSH(sc);
11427 			delay(50*1000);
11428 		}
11429 
11430 		/* Toggle LANPHYPC */
11431 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11432 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11433 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11434 		CSR_WRITE_FLUSH(sc);
11435 		delay(10);
11436 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11437 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11438 		CSR_WRITE_FLUSH(sc);
11439 		delay(50*1000);
11440 
11441 		if (sc->sc_type >= WM_T_PCH_LPT) {
11442 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
11443 			reg &= ~CTRL_EXT_FORCE_SMBUS;
11444 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11445 		}
11446 	}
11447 
11448 	/* Release semaphore */
11449 	wm_put_swfwhw_semaphore(sc);
11450 
11451 	/*
11452 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
11453 	 */
11454 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
11455 		wm_gate_hw_phy_config_ich8lan(sc, false);
11456 }
11457 
11458 static void
11459 wm_init_manageability(struct wm_softc *sc)
11460 {
11461 
11462 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11463 		device_xname(sc->sc_dev), __func__));
11464 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11465 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11466 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11467 
11468 		/* Disable hardware interception of ARP */
11469 		manc &= ~MANC_ARP_EN;
11470 
11471 		/* Enable receiving management packets to the host */
11472 		if (sc->sc_type >= WM_T_82571) {
11473 			manc |= MANC_EN_MNG2HOST;
11474 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11475 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11476 		}
11477 
11478 		CSR_WRITE(sc, WMREG_MANC, manc);
11479 	}
11480 }
11481 
11482 static void
11483 wm_release_manageability(struct wm_softc *sc)
11484 {
11485 
11486 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11487 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11488 
11489 		manc |= MANC_ARP_EN;
11490 		if (sc->sc_type >= WM_T_82571)
11491 			manc &= ~MANC_EN_MNG2HOST;
11492 
11493 		CSR_WRITE(sc, WMREG_MANC, manc);
11494 	}
11495 }
11496 
11497 static void
11498 wm_get_wakeup(struct wm_softc *sc)
11499 {
11500 
11501 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11502 	switch (sc->sc_type) {
11503 	case WM_T_82573:
11504 	case WM_T_82583:
11505 		sc->sc_flags |= WM_F_HAS_AMT;
11506 		/* FALLTHROUGH */
11507 	case WM_T_80003:
11508 	case WM_T_82541:
11509 	case WM_T_82547:
11510 	case WM_T_82571:
11511 	case WM_T_82572:
11512 	case WM_T_82574:
11513 	case WM_T_82575:
11514 	case WM_T_82576:
11515 	case WM_T_82580:
11516 	case WM_T_I350:
11517 	case WM_T_I354:
11518 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
11519 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11520 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11521 		break;
11522 	case WM_T_ICH8:
11523 	case WM_T_ICH9:
11524 	case WM_T_ICH10:
11525 	case WM_T_PCH:
11526 	case WM_T_PCH2:
11527 	case WM_T_PCH_LPT:
11528 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
11529 		sc->sc_flags |= WM_F_HAS_AMT;
11530 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11531 		break;
11532 	default:
11533 		break;
11534 	}
11535 
11536 	/* 1: HAS_MANAGE */
11537 	if (wm_enable_mng_pass_thru(sc) != 0)
11538 		sc->sc_flags |= WM_F_HAS_MANAGE;
11539 
11540 #ifdef WM_DEBUG
11541 	printf("\n");
11542 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11543 		printf("HAS_AMT,");
11544 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11545 		printf("ARC_SUBSYS_VALID,");
11546 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11547 		printf("ASF_FIRMWARE_PRES,");
11548 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11549 		printf("HAS_MANAGE,");
11550 	printf("\n");
11551 #endif
11552 	/*
11553 	 * Note that the WOL flags is set after the resetting of the eeprom
11554 	 * stuff
11555 	 */
11556 }
11557 
11558 #ifdef WM_WOL
11559 /* WOL in the newer chipset interfaces (pchlan) */
11560 static void
11561 wm_enable_phy_wakeup(struct wm_softc *sc)
11562 {
11563 #if 0
11564 	uint16_t preg;
11565 
11566 	/* Copy MAC RARs to PHY RARs */
11567 
11568 	/* Copy MAC MTA to PHY MTA */
11569 
11570 	/* Configure PHY Rx Control register */
11571 
11572 	/* Enable PHY wakeup in MAC register */
11573 
11574 	/* Configure and enable PHY wakeup in PHY registers */
11575 
11576 	/* Activate PHY wakeup */
11577 
11578 	/* XXX */
11579 #endif
11580 }
11581 
11582 /* Power down workaround on D3 */
11583 static void
11584 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11585 {
11586 	uint32_t reg;
11587 	int i;
11588 
11589 	for (i = 0; i < 2; i++) {
11590 		/* Disable link */
11591 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11592 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11593 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11594 
11595 		/*
11596 		 * Call gig speed drop workaround on Gig disable before
11597 		 * accessing any PHY registers
11598 		 */
11599 		if (sc->sc_type == WM_T_ICH8)
11600 			wm_gig_downshift_workaround_ich8lan(sc);
11601 
11602 		/* Write VR power-down enable */
11603 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11604 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11605 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11606 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11607 
11608 		/* Read it back and test */
11609 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11610 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11611 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11612 			break;
11613 
11614 		/* Issue PHY reset and repeat at most one more time */
11615 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11616 	}
11617 }
11618 
11619 static void
11620 wm_enable_wakeup(struct wm_softc *sc)
11621 {
11622 	uint32_t reg, pmreg;
11623 	pcireg_t pmode;
11624 
11625 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11626 		&pmreg, NULL) == 0)
11627 		return;
11628 
11629 	/* Advertise the wakeup capability */
11630 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11631 	    | CTRL_SWDPIN(3));
11632 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11633 
11634 	/* ICH workaround */
11635 	switch (sc->sc_type) {
11636 	case WM_T_ICH8:
11637 	case WM_T_ICH9:
11638 	case WM_T_ICH10:
11639 	case WM_T_PCH:
11640 	case WM_T_PCH2:
11641 	case WM_T_PCH_LPT:
11642 	case WM_T_PCH_SPT:
11643 		/* Disable gig during WOL */
11644 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11645 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11646 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11647 		if (sc->sc_type == WM_T_PCH)
11648 			wm_gmii_reset(sc);
11649 
11650 		/* Power down workaround */
11651 		if (sc->sc_phytype == WMPHY_82577) {
11652 			struct mii_softc *child;
11653 
11654 			/* Assume that the PHY is copper */
11655 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
11656 			if (child->mii_mpd_rev <= 2)
11657 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
11658 				    (768 << 5) | 25, 0x0444); /* magic num */
11659 		}
11660 		break;
11661 	default:
11662 		break;
11663 	}
11664 
11665 	/* Keep the laser running on fiber adapters */
11666 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
11667 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
11668 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11669 		reg |= CTRL_EXT_SWDPIN(3);
11670 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11671 	}
11672 
11673 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
11674 #if 0	/* for the multicast packet */
11675 	reg |= WUFC_MC;
11676 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
11677 #endif
11678 
11679 	if (sc->sc_type == WM_T_PCH) {
11680 		wm_enable_phy_wakeup(sc);
11681 	} else {
11682 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
11683 		CSR_WRITE(sc, WMREG_WUFC, reg);
11684 	}
11685 
11686 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11687 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11688 		|| (sc->sc_type == WM_T_PCH2))
11689 		    && (sc->sc_phytype == WMPHY_IGP_3))
11690 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
11691 
11692 	/* Request PME */
11693 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
11694 #if 0
11695 	/* Disable WOL */
11696 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
11697 #else
11698 	/* For WOL */
11699 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
11700 #endif
11701 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
11702 }
11703 #endif /* WM_WOL */
11704 
11705 /* LPLU */
11706 
11707 static void
11708 wm_lplu_d0_disable(struct wm_softc *sc)
11709 {
11710 	uint32_t reg;
11711 
11712 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
11713 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
11714 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11715 }
11716 
11717 static void
11718 wm_lplu_d0_disable_pch(struct wm_softc *sc)
11719 {
11720 	uint32_t reg;
11721 
11722 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
11723 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
11724 	reg |= HV_OEM_BITS_ANEGNOW;
11725 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
11726 }
11727 
11728 /* EEE */
11729 
11730 static void
11731 wm_set_eee_i350(struct wm_softc *sc)
11732 {
11733 	uint32_t ipcnfg, eeer;
11734 
11735 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
11736 	eeer = CSR_READ(sc, WMREG_EEER);
11737 
11738 	if ((sc->sc_flags & WM_F_EEE) != 0) {
11739 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11740 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
11741 		    | EEER_LPI_FC);
11742 	} else {
11743 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
11744 		ipcnfg &= ~IPCNFG_10BASE_TE;
11745 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
11746 		    | EEER_LPI_FC);
11747 	}
11748 
11749 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
11750 	CSR_WRITE(sc, WMREG_EEER, eeer);
11751 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
11752 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
11753 }
11754 
11755 /*
11756  * Workarounds (mainly PHY related).
11757  * Basically, PHY's workarounds are in the PHY drivers.
11758  */
11759 
11760 /* Work-around for 82566 Kumeran PCS lock loss */
11761 static void
11762 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
11763 {
11764 #if 0
11765 	int miistatus, active, i;
11766 	int reg;
11767 
11768 	miistatus = sc->sc_mii.mii_media_status;
11769 
11770 	/* If the link is not up, do nothing */
11771 	if ((miistatus & IFM_ACTIVE) == 0)
11772 		return;
11773 
11774 	active = sc->sc_mii.mii_media_active;
11775 
11776 	/* Nothing to do if the link is other than 1Gbps */
11777 	if (IFM_SUBTYPE(active) != IFM_1000_T)
11778 		return;
11779 
11780 	for (i = 0; i < 10; i++) {
11781 		/* read twice */
11782 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11783 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
11784 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
11785 			goto out;	/* GOOD! */
11786 
11787 		/* Reset the PHY */
11788 		wm_gmii_reset(sc);
11789 		delay(5*1000);
11790 	}
11791 
11792 	/* Disable GigE link negotiation */
11793 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
11794 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11795 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11796 
11797 	/*
11798 	 * Call gig speed drop workaround on Gig disable before accessing
11799 	 * any PHY registers.
11800 	 */
11801 	wm_gig_downshift_workaround_ich8lan(sc);
11802 
11803 out:
11804 	return;
11805 #endif
11806 }
11807 
11808 /* WOL from S5 stops working */
11809 static void
11810 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
11811 {
11812 	uint16_t kmrn_reg;
11813 
11814 	/* Only for igp3 */
11815 	if (sc->sc_phytype == WMPHY_IGP_3) {
11816 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
11817 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
11818 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11819 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
11820 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
11821 	}
11822 }
11823 
11824 /*
11825  * Workaround for pch's PHYs
11826  * XXX should be moved to new PHY driver?
11827  */
11828 static void
11829 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
11830 {
11831 	if (sc->sc_phytype == WMPHY_82577)
11832 		wm_set_mdio_slow_mode_hv(sc);
11833 
11834 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
11835 
11836 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
11837 
11838 	/* 82578 */
11839 	if (sc->sc_phytype == WMPHY_82578) {
11840 		/* PCH rev. < 3 */
11841 		if (sc->sc_rev < 3) {
11842 			/* XXX 6 bit shift? Why? Is it page2? */
11843 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
11844 			    0x66c0);
11845 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
11846 			    0xffff);
11847 		}
11848 
11849 		/* XXX phy rev. < 2 */
11850 	}
11851 
11852 	/* Select page 0 */
11853 
11854 	/* XXX acquire semaphore */
11855 	wm_gmii_i82544_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
11856 	/* XXX release semaphore */
11857 
11858 	/*
11859 	 * Configure the K1 Si workaround during phy reset assuming there is
11860 	 * link so that it disables K1 if link is in 1Gbps.
11861 	 */
11862 	wm_k1_gig_workaround_hv(sc, 1);
11863 }
11864 
11865 static void
11866 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
11867 {
11868 
11869 	wm_set_mdio_slow_mode_hv(sc);
11870 }
11871 
11872 static void
11873 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
11874 {
11875 	int k1_enable = sc->sc_nvm_k1_enabled;
11876 
11877 	/* XXX acquire semaphore */
11878 
11879 	if (link) {
11880 		k1_enable = 0;
11881 
11882 		/* Link stall fix for link up */
11883 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
11884 	} else {
11885 		/* Link stall fix for link down */
11886 		wm_gmii_hv_writereg(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
11887 	}
11888 
11889 	wm_configure_k1_ich8lan(sc, k1_enable);
11890 
11891 	/* XXX release semaphore */
11892 }
11893 
11894 static void
11895 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
11896 {
11897 	uint32_t reg;
11898 
11899 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
11900 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
11901 	    reg | HV_KMRN_MDIO_SLOW);
11902 }
11903 
11904 static void
11905 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
11906 {
11907 	uint32_t ctrl, ctrl_ext, tmp;
11908 	uint16_t kmrn_reg;
11909 
11910 	kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
11911 
11912 	if (k1_enable)
11913 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
11914 	else
11915 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
11916 
11917 	wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
11918 
11919 	delay(20);
11920 
11921 	ctrl = CSR_READ(sc, WMREG_CTRL);
11922 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11923 
11924 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
11925 	tmp |= CTRL_FRCSPD;
11926 
11927 	CSR_WRITE(sc, WMREG_CTRL, tmp);
11928 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
11929 	CSR_WRITE_FLUSH(sc);
11930 	delay(20);
11931 
11932 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
11933 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11934 	CSR_WRITE_FLUSH(sc);
11935 	delay(20);
11936 }
11937 
11938 /* special case - for 82575 - need to do manual init ... */
11939 static void
11940 wm_reset_init_script_82575(struct wm_softc *sc)
11941 {
11942 	/*
11943 	 * remark: this is untested code - we have no board without EEPROM
11944 	 *  same setup as mentioned int the FreeBSD driver for the i82575
11945 	 */
11946 
11947 	/* SerDes configuration via SERDESCTRL */
11948 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
11949 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
11950 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
11951 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
11952 
11953 	/* CCM configuration via CCMCTL register */
11954 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
11955 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
11956 
11957 	/* PCIe lanes configuration */
11958 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
11959 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
11960 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
11961 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
11962 
11963 	/* PCIe PLL Configuration */
11964 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
11965 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
11966 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
11967 }
11968 
11969 static void
11970 wm_reset_mdicnfg_82580(struct wm_softc *sc)
11971 {
11972 	uint32_t reg;
11973 	uint16_t nvmword;
11974 	int rv;
11975 
11976 	if ((sc->sc_flags & WM_F_SGMII) == 0)
11977 		return;
11978 
11979 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
11980 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
11981 	if (rv != 0) {
11982 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
11983 		    __func__);
11984 		return;
11985 	}
11986 
11987 	reg = CSR_READ(sc, WMREG_MDICNFG);
11988 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
11989 		reg |= MDICNFG_DEST;
11990 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
11991 		reg |= MDICNFG_COM_MDIO;
11992 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
11993 }
11994 
11995 /*
11996  * I210 Errata 25 and I211 Errata 10
11997  * Slow System Clock.
11998  */
11999 static void
12000 wm_pll_workaround_i210(struct wm_softc *sc)
12001 {
12002 	uint32_t mdicnfg, wuc;
12003 	uint32_t reg;
12004 	pcireg_t pcireg;
12005 	uint32_t pmreg;
12006 	uint16_t nvmword, tmp_nvmword;
12007 	int phyval;
12008 	bool wa_done = false;
12009 	int i;
12010 
12011 	/* Save WUC and MDICNFG registers */
12012 	wuc = CSR_READ(sc, WMREG_WUC);
12013 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
12014 
12015 	reg = mdicnfg & ~MDICNFG_DEST;
12016 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
12017 
12018 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
12019 		nvmword = INVM_DEFAULT_AL;
12020 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
12021 
12022 	/* Get Power Management cap offset */
12023 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12024 		&pmreg, NULL) == 0)
12025 		return;
12026 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
12027 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
12028 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
12029 
12030 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
12031 			break; /* OK */
12032 		}
12033 
12034 		wa_done = true;
12035 		/* Directly reset the internal PHY */
12036 		reg = CSR_READ(sc, WMREG_CTRL);
12037 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
12038 
12039 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12040 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
12041 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12042 
12043 		CSR_WRITE(sc, WMREG_WUC, 0);
12044 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
12045 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12046 
12047 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
12048 		    pmreg + PCI_PMCSR);
12049 		pcireg |= PCI_PMCSR_STATE_D3;
12050 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12051 		    pmreg + PCI_PMCSR, pcireg);
12052 		delay(1000);
12053 		pcireg &= ~PCI_PMCSR_STATE_D3;
12054 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12055 		    pmreg + PCI_PMCSR, pcireg);
12056 
12057 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
12058 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12059 
12060 		/* Restore WUC register */
12061 		CSR_WRITE(sc, WMREG_WUC, wuc);
12062 	}
12063 
12064 	/* Restore MDICNFG setting */
12065 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
12066 	if (wa_done)
12067 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
12068 }
12069