xref: /netbsd-src/sys/dev/pci/if_wm.c (revision 8450a7c42673d65e3b1f6560d3b6ecd317a6cbe8)
1 /*	$NetBSD: if_wm.c,v 1.428 2016/10/26 10:21:44 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
77  *	- TX Multi queue improvement (refine queue selection logic)
78  *	- Advanced Receive Descriptor
79  *	- EEE (Energy Efficiency Ethernet)
80  *	- Virtual Function
81  *	- Set LED correctly (based on contents in EEPROM)
82  *	- Rework how parameters are loaded from the EEPROM.
83  *	- Image Unique ID
84  */
85 
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.428 2016/10/26 10:21:44 msaitoh Exp $");
88 
89 #ifdef _KERNEL_OPT
90 #include "opt_net_mpsafe.h"
91 #endif
92 
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/mbuf.h>
97 #include <sys/malloc.h>
98 #include <sys/kmem.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 #include <sys/ioctl.h>
102 #include <sys/errno.h>
103 #include <sys/device.h>
104 #include <sys/queue.h>
105 #include <sys/syslog.h>
106 #include <sys/interrupt.h>
107 #include <sys/cpu.h>
108 #include <sys/pcq.h>
109 
110 #include <sys/rndsource.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <netinet/in.h>			/* XXX for struct ip */
120 #include <netinet/in_systm.h>		/* XXX for struct ip */
121 #include <netinet/ip.h>			/* XXX for struct ip */
122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
124 
125 #include <sys/bus.h>
126 #include <sys/intr.h>
127 #include <machine/endian.h>
128 
129 #include <dev/mii/mii.h>
130 #include <dev/mii/miivar.h>
131 #include <dev/mii/miidevs.h>
132 #include <dev/mii/mii_bitbang.h>
133 #include <dev/mii/ikphyreg.h>
134 #include <dev/mii/igphyreg.h>
135 #include <dev/mii/igphyvar.h>
136 #include <dev/mii/inbmphyreg.h>
137 
138 #include <dev/pci/pcireg.h>
139 #include <dev/pci/pcivar.h>
140 #include <dev/pci/pcidevs.h>
141 
142 #include <dev/pci/if_wmreg.h>
143 #include <dev/pci/if_wmvar.h>
144 
145 #ifdef WM_DEBUG
146 #define	WM_DEBUG_LINK		__BIT(0)
147 #define	WM_DEBUG_TX		__BIT(1)
148 #define	WM_DEBUG_RX		__BIT(2)
149 #define	WM_DEBUG_GMII		__BIT(3)
150 #define	WM_DEBUG_MANAGE		__BIT(4)
151 #define	WM_DEBUG_NVM		__BIT(5)
152 #define	WM_DEBUG_INIT		__BIT(6)
153 #define	WM_DEBUG_LOCK		__BIT(7)
154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
156 
157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
158 #else
159 #define	DPRINTF(x, y)	/* nothing */
160 #endif /* WM_DEBUG */
161 
162 #ifdef NET_MPSAFE
163 #define WM_MPSAFE	1
164 #endif
165 
166 /*
167  * This device driver's max interrupt numbers.
168  */
169 #define WM_MAX_NQUEUEINTR	16
170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
171 
172 /*
173  * Transmit descriptor list size.  Due to errata, we can only have
174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
175  * on >= 82544.  We tell the upper layers that they can queue a lot
176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177  * of them at a time.
178  *
179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
180  * chains containing many small mbufs have been observed in zero-copy
181  * situations with jumbo frames.
182  */
183 #define	WM_NTXSEGS		256
184 #define	WM_IFQUEUELEN		256
185 #define	WM_TXQUEUELEN_MAX	64
186 #define	WM_TXQUEUELEN_MAX_82547	16
187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
190 #define	WM_NTXDESC_82542	256
191 #define	WM_NTXDESC_82544	4096
192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197 
198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
199 
200 #define	WM_TXINTERQSIZE		256
201 
202 /*
203  * Receive descriptor list size.  We have one Rx buffer for normal
204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
205  * packet.  We allocate 256 receive descriptors, each with a 2k
206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
207  */
208 #define	WM_NRXDESC		256
209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
212 
213 typedef union txdescs {
214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
216 } txdescs_t;
217 
218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
220 
221 /*
222  * Software state for transmit jobs.
223  */
224 struct wm_txsoft {
225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
227 	int txs_firstdesc;		/* first descriptor in packet */
228 	int txs_lastdesc;		/* last descriptor in packet */
229 	int txs_ndesc;			/* # of descriptors used */
230 };
231 
232 /*
233  * Software state for receive buffers.  Each descriptor gets a
234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
235  * more than one buffer, we chain them together.
236  */
237 struct wm_rxsoft {
238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
240 };
241 
242 #define WM_LINKUP_TIMEOUT	50
243 
244 static uint16_t swfwphysem[] = {
245 	SWFW_PHY0_SM,
246 	SWFW_PHY1_SM,
247 	SWFW_PHY2_SM,
248 	SWFW_PHY3_SM
249 };
250 
251 static const uint32_t wm_82580_rxpbs_table[] = {
252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
253 };
254 
255 struct wm_softc;
256 
257 #ifdef WM_EVENT_COUNTERS
258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
260 	struct evcnt qname##_ev_##evname;
261 
262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
263 	do{								\
264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
266 		    "%s%02d%s", #qname, (qnum), #evname);		\
267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
268 		    (evtype), NULL, (xname),				\
269 		    (q)->qname##_##evname##_evcnt_name);		\
270 	}while(0)
271 
272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
274 
275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
277 #endif /* WM_EVENT_COUNTERS */
278 
279 struct wm_txqueue {
280 	kmutex_t *txq_lock;		/* lock for tx operations */
281 
282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
283 
284 	/* Software state for the transmit descriptors. */
285 	int txq_num;			/* must be a power of two */
286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
287 
288 	/* TX control data structures. */
289 	int txq_ndesc;			/* must be a power of two */
290 	size_t txq_descsize;		/* a tx descriptor size */
291 	txdescs_t *txq_descs_u;
292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
294 	int txq_desc_rseg;		/* real number of control segment */
295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
296 #define	txq_descs	txq_descs_u->sctxu_txdescs
297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
298 
299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
300 
301 	int txq_free;			/* number of free Tx descriptors */
302 	int txq_next;			/* next ready Tx descriptor */
303 
304 	int txq_sfree;			/* number of free Tx jobs */
305 	int txq_snext;			/* next free Tx job */
306 	int txq_sdirty;			/* dirty Tx jobs */
307 
308 	/* These 4 variables are used only on the 82547. */
309 	int txq_fifo_size;		/* Tx FIFO size */
310 	int txq_fifo_head;		/* current head of FIFO */
311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
313 
314 	/*
315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
316 	 * CPUs. This queue intermediate them without block.
317 	 */
318 	pcq_t *txq_interq;
319 
320 	/*
321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
322 	 * to manage Tx H/W queue's busy flag.
323 	 */
324 	int txq_flags;			/* flags for H/W queue, see below */
325 #define	WM_TXQ_NO_SPACE	0x1
326 
327 #ifdef WM_EVENT_COUNTERS
328 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
329 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
330 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
331 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
332 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
333 						/* XXX not used? */
334 
335 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
336 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
337 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
338 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
339 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
340 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
341 
342 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
343 
344 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
345 
346 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
347 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
348 #endif /* WM_EVENT_COUNTERS */
349 };
350 
351 struct wm_rxqueue {
352 	kmutex_t *rxq_lock;		/* lock for rx operations */
353 
354 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
355 
356 	/* Software state for the receive descriptors. */
357 	wiseman_rxdesc_t *rxq_descs;
358 
359 	/* RX control data structures. */
360 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
361 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
362 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
363 	int rxq_desc_rseg;		/* real number of control segment */
364 	size_t rxq_desc_size;		/* control data size */
365 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
366 
367 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
368 
369 	int rxq_ptr;			/* next ready Rx desc/queue ent */
370 	int rxq_discard;
371 	int rxq_len;
372 	struct mbuf *rxq_head;
373 	struct mbuf *rxq_tail;
374 	struct mbuf **rxq_tailp;
375 
376 #ifdef WM_EVENT_COUNTERS
377 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
378 
379 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
380 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
381 #endif
382 };
383 
384 struct wm_queue {
385 	int wmq_id;			/* index of transmit and receive queues */
386 	int wmq_intr_idx;		/* index of MSI-X tables */
387 
388 	struct wm_txqueue wmq_txq;
389 	struct wm_rxqueue wmq_rxq;
390 };
391 
392 struct wm_phyop {
393 	int (*acquire)(struct wm_softc *);
394 	void (*release)(struct wm_softc *);
395 };
396 
397 /*
398  * Software state per device.
399  */
400 struct wm_softc {
401 	device_t sc_dev;		/* generic device information */
402 	bus_space_tag_t sc_st;		/* bus space tag */
403 	bus_space_handle_t sc_sh;	/* bus space handle */
404 	bus_size_t sc_ss;		/* bus space size */
405 	bus_space_tag_t sc_iot;		/* I/O space tag */
406 	bus_space_handle_t sc_ioh;	/* I/O space handle */
407 	bus_size_t sc_ios;		/* I/O space size */
408 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
409 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
410 	bus_size_t sc_flashs;		/* flash registers space size */
411 	off_t sc_flashreg_offset;	/*
412 					 * offset to flash registers from
413 					 * start of BAR
414 					 */
415 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
416 
417 	struct ethercom sc_ethercom;	/* ethernet common data */
418 	struct mii_data sc_mii;		/* MII/media information */
419 
420 	pci_chipset_tag_t sc_pc;
421 	pcitag_t sc_pcitag;
422 	int sc_bus_speed;		/* PCI/PCIX bus speed */
423 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
424 
425 	uint16_t sc_pcidevid;		/* PCI device ID */
426 	wm_chip_type sc_type;		/* MAC type */
427 	int sc_rev;			/* MAC revision */
428 	wm_phy_type sc_phytype;		/* PHY type */
429 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
430 #define	WM_MEDIATYPE_UNKNOWN		0x00
431 #define	WM_MEDIATYPE_FIBER		0x01
432 #define	WM_MEDIATYPE_COPPER		0x02
433 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
434 	int sc_funcid;			/* unit number of the chip (0 to 3) */
435 	int sc_flags;			/* flags; see below */
436 	int sc_if_flags;		/* last if_flags */
437 	int sc_flowflags;		/* 802.3x flow control flags */
438 	int sc_align_tweak;
439 
440 	void *sc_ihs[WM_MAX_NINTR];	/*
441 					 * interrupt cookie.
442 					 * legacy and msi use sc_ihs[0].
443 					 */
444 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
445 	int sc_nintrs;			/* number of interrupts */
446 
447 	int sc_link_intr_idx;		/* index of MSI-X tables */
448 
449 	callout_t sc_tick_ch;		/* tick callout */
450 	bool sc_stopping;
451 
452 	int sc_nvm_ver_major;
453 	int sc_nvm_ver_minor;
454 	int sc_nvm_ver_build;
455 	int sc_nvm_addrbits;		/* NVM address bits */
456 	unsigned int sc_nvm_wordsize;	/* NVM word size */
457 	int sc_ich8_flash_base;
458 	int sc_ich8_flash_bank_size;
459 	int sc_nvm_k1_enabled;
460 
461 	int sc_nqueues;
462 	struct wm_queue *sc_queue;
463 
464 	int sc_affinity_offset;
465 
466 #ifdef WM_EVENT_COUNTERS
467 	/* Event counters. */
468 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
469 
470         /* WM_T_82542_2_1 only */
471 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
472 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
473 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
474 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
475 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
476 #endif /* WM_EVENT_COUNTERS */
477 
478 	/* This variable are used only on the 82547. */
479 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
480 
481 	uint32_t sc_ctrl;		/* prototype CTRL register */
482 #if 0
483 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
484 #endif
485 	uint32_t sc_icr;		/* prototype interrupt bits */
486 	uint32_t sc_itr;		/* prototype intr throttling reg */
487 	uint32_t sc_tctl;		/* prototype TCTL register */
488 	uint32_t sc_rctl;		/* prototype RCTL register */
489 	uint32_t sc_txcw;		/* prototype TXCW register */
490 	uint32_t sc_tipg;		/* prototype TIPG register */
491 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
492 	uint32_t sc_pba;		/* prototype PBA register */
493 
494 	int sc_tbi_linkup;		/* TBI link status */
495 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
496 	int sc_tbi_serdes_ticks;	/* tbi ticks */
497 
498 	int sc_mchash_type;		/* multicast filter offset */
499 
500 	krndsource_t rnd_source;	/* random source */
501 
502 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
503 
504 	kmutex_t *sc_core_lock;		/* lock for softc operations */
505 	kmutex_t *sc_ich_phymtx;	/*
506 					 * 82574/82583/ICH/PCH specific PHY
507 					 * mutex. For 82574/82583, the mutex
508 					 * is used for both PHY and NVM.
509 					 */
510 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
511 
512 	struct wm_phyop phy;
513 };
514 
515 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
516 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
517 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
518 
519 #ifdef WM_MPSAFE
520 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
521 #else
522 #define CALLOUT_FLAGS	0
523 #endif
524 
525 #define	WM_RXCHAIN_RESET(rxq)						\
526 do {									\
527 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
528 	*(rxq)->rxq_tailp = NULL;					\
529 	(rxq)->rxq_len = 0;						\
530 } while (/*CONSTCOND*/0)
531 
532 #define	WM_RXCHAIN_LINK(rxq, m)						\
533 do {									\
534 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
535 	(rxq)->rxq_tailp = &(m)->m_next;				\
536 } while (/*CONSTCOND*/0)
537 
538 #ifdef WM_EVENT_COUNTERS
539 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
540 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
541 
542 #define WM_Q_EVCNT_INCR(qname, evname)			\
543 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
544 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
545 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
546 #else /* !WM_EVENT_COUNTERS */
547 #define	WM_EVCNT_INCR(ev)	/* nothing */
548 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
549 
550 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
551 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
552 #endif /* !WM_EVENT_COUNTERS */
553 
554 #define	CSR_READ(sc, reg)						\
555 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
556 #define	CSR_WRITE(sc, reg, val)						\
557 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
558 #define	CSR_WRITE_FLUSH(sc)						\
559 	(void) CSR_READ((sc), WMREG_STATUS)
560 
561 #define ICH8_FLASH_READ32(sc, reg)					\
562 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
563 	    (reg) + sc->sc_flashreg_offset)
564 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
565 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
566 	    (reg) + sc->sc_flashreg_offset, (data))
567 
568 #define ICH8_FLASH_READ16(sc, reg)					\
569 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
570 	    (reg) + sc->sc_flashreg_offset)
571 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
572 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
573 	    (reg) + sc->sc_flashreg_offset, (data))
574 
575 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
576 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
577 
578 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
579 #define	WM_CDTXADDR_HI(txq, x)						\
580 	(sizeof(bus_addr_t) == 8 ?					\
581 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
582 
583 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
584 #define	WM_CDRXADDR_HI(rxq, x)						\
585 	(sizeof(bus_addr_t) == 8 ?					\
586 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
587 
588 /*
589  * Register read/write functions.
590  * Other than CSR_{READ|WRITE}().
591  */
592 #if 0
593 static inline uint32_t wm_io_read(struct wm_softc *, int);
594 #endif
595 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
596 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
597 	uint32_t, uint32_t);
598 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
599 
600 /*
601  * Descriptor sync/init functions.
602  */
603 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
604 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
605 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
606 
607 /*
608  * Device driver interface functions and commonly used functions.
609  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
610  */
611 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
612 static int	wm_match(device_t, cfdata_t, void *);
613 static void	wm_attach(device_t, device_t, void *);
614 static int	wm_detach(device_t, int);
615 static bool	wm_suspend(device_t, const pmf_qual_t *);
616 static bool	wm_resume(device_t, const pmf_qual_t *);
617 static void	wm_watchdog(struct ifnet *);
618 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
619 static void	wm_tick(void *);
620 static int	wm_ifflags_cb(struct ethercom *);
621 static int	wm_ioctl(struct ifnet *, u_long, void *);
622 /* MAC address related */
623 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
624 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
625 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
626 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
627 static void	wm_set_filter(struct wm_softc *);
628 /* Reset and init related */
629 static void	wm_set_vlan(struct wm_softc *);
630 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
631 static void	wm_get_auto_rd_done(struct wm_softc *);
632 static void	wm_lan_init_done(struct wm_softc *);
633 static void	wm_get_cfg_done(struct wm_softc *);
634 static void	wm_initialize_hardware_bits(struct wm_softc *);
635 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
636 static void	wm_reset(struct wm_softc *);
637 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
638 static void	wm_rxdrain(struct wm_rxqueue *);
639 static void	wm_rss_getkey(uint8_t *);
640 static void	wm_init_rss(struct wm_softc *);
641 static void	wm_adjust_qnum(struct wm_softc *, int);
642 static int	wm_setup_legacy(struct wm_softc *);
643 static int	wm_setup_msix(struct wm_softc *);
644 static int	wm_init(struct ifnet *);
645 static int	wm_init_locked(struct ifnet *);
646 static void	wm_stop(struct ifnet *, int);
647 static void	wm_stop_locked(struct ifnet *, int);
648 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
649 static void	wm_82547_txfifo_stall(void *);
650 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
651 /* DMA related */
652 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
653 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
654 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
655 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
656     struct wm_txqueue *);
657 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
658 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
659 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
660     struct wm_rxqueue *);
661 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
662 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
663 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
664 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
665 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
666 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
667 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
668     struct wm_txqueue *);
669 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
670     struct wm_rxqueue *);
671 static int	wm_alloc_txrx_queues(struct wm_softc *);
672 static void	wm_free_txrx_queues(struct wm_softc *);
673 static int	wm_init_txrx_queues(struct wm_softc *);
674 /* Start */
675 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
676     uint32_t *, uint8_t *);
677 static void	wm_start(struct ifnet *);
678 static void	wm_start_locked(struct ifnet *);
679 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
680     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
681 static void	wm_nq_start(struct ifnet *);
682 static void	wm_nq_start_locked(struct ifnet *);
683 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
684 static inline int	wm_nq_select_txqueue(struct ifnet *, struct mbuf *);
685 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
686 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
687 /* Interrupt */
688 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
689 static void	wm_rxeof(struct wm_rxqueue *);
690 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
691 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
692 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
693 static void	wm_linkintr(struct wm_softc *, uint32_t);
694 static int	wm_intr_legacy(void *);
695 static int	wm_txrxintr_msix(void *);
696 static int	wm_linkintr_msix(void *);
697 
698 /*
699  * Media related.
700  * GMII, SGMII, TBI, SERDES and SFP.
701  */
702 /* Common */
703 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
704 /* GMII related */
705 static void	wm_gmii_reset(struct wm_softc *);
706 static int	wm_get_phy_id_82575(struct wm_softc *);
707 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
708 static int	wm_gmii_mediachange(struct ifnet *);
709 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
710 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
711 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
712 static int	wm_gmii_i82543_readreg(device_t, int, int);
713 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
714 static int	wm_gmii_mdic_readreg(device_t, int, int);
715 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
716 static int	wm_gmii_i82544_readreg(device_t, int, int);
717 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
718 static int	wm_gmii_i80003_readreg(device_t, int, int);
719 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
720 static int	wm_gmii_bm_readreg(device_t, int, int);
721 static void	wm_gmii_bm_writereg(device_t, int, int, int);
722 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
723 static int	wm_gmii_hv_readreg(device_t, int, int);
724 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
725 static void	wm_gmii_hv_writereg(device_t, int, int, int);
726 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
727 static int	wm_gmii_82580_readreg(device_t, int, int);
728 static void	wm_gmii_82580_writereg(device_t, int, int, int);
729 static int	wm_gmii_gs40g_readreg(device_t, int, int);
730 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
731 static void	wm_gmii_statchg(struct ifnet *);
732 static int	wm_kmrn_readreg(struct wm_softc *, int);
733 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
734 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
735 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
736 /* SGMII */
737 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
738 static int	wm_sgmii_readreg(device_t, int, int);
739 static void	wm_sgmii_writereg(device_t, int, int, int);
740 /* TBI related */
741 static void	wm_tbi_mediainit(struct wm_softc *);
742 static int	wm_tbi_mediachange(struct ifnet *);
743 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
744 static int	wm_check_for_link(struct wm_softc *);
745 static void	wm_tbi_tick(struct wm_softc *);
746 /* SERDES related */
747 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
748 static int	wm_serdes_mediachange(struct ifnet *);
749 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
750 static void	wm_serdes_tick(struct wm_softc *);
751 /* SFP related */
752 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
753 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
754 
755 /*
756  * NVM related.
757  * Microwire, SPI (w/wo EERD) and Flash.
758  */
759 /* Misc functions */
760 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
761 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
762 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
763 /* Microwire */
764 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
765 /* SPI */
766 static int	wm_nvm_ready_spi(struct wm_softc *);
767 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
768 /* Using with EERD */
769 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
770 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
771 /* Flash */
772 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
773     unsigned int *);
774 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
775 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
776 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
777 	uint32_t *);
778 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
779 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
780 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
781 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
782 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
783 /* iNVM */
784 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
785 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
786 /* Lock, detecting NVM type, validate checksum and read */
787 static int	wm_nvm_acquire(struct wm_softc *);
788 static void	wm_nvm_release(struct wm_softc *);
789 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
790 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
791 static int	wm_nvm_validate_checksum(struct wm_softc *);
792 static void	wm_nvm_version_invm(struct wm_softc *);
793 static void	wm_nvm_version(struct wm_softc *);
794 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
795 
796 /*
797  * Hardware semaphores.
798  * Very complexed...
799  */
800 static int	wm_get_null(struct wm_softc *);
801 static void	wm_put_null(struct wm_softc *);
802 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
803 static void	wm_put_swsm_semaphore(struct wm_softc *);
804 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
805 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
806 static int	wm_get_phy_82575(struct wm_softc *);
807 static void	wm_put_phy_82575(struct wm_softc *);
808 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
809 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
810 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
811 static void	wm_put_swflag_ich8lan(struct wm_softc *);
812 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
813 static void	wm_put_nvm_ich8lan(struct wm_softc *);
814 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
815 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
816 
817 /*
818  * Management mode and power management related subroutines.
819  * BMC, AMT, suspend/resume and EEE.
820  */
821 #ifdef WM_WOL
822 static int	wm_check_mng_mode(struct wm_softc *);
823 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
824 static int	wm_check_mng_mode_82574(struct wm_softc *);
825 static int	wm_check_mng_mode_generic(struct wm_softc *);
826 #endif
827 static int	wm_enable_mng_pass_thru(struct wm_softc *);
828 static bool	wm_phy_resetisblocked(struct wm_softc *);
829 static void	wm_get_hw_control(struct wm_softc *);
830 static void	wm_release_hw_control(struct wm_softc *);
831 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
832 static void	wm_smbustopci(struct wm_softc *);
833 static void	wm_init_manageability(struct wm_softc *);
834 static void	wm_release_manageability(struct wm_softc *);
835 static void	wm_get_wakeup(struct wm_softc *);
836 #ifdef WM_WOL
837 static void	wm_enable_phy_wakeup(struct wm_softc *);
838 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
839 static void	wm_enable_wakeup(struct wm_softc *);
840 #endif
841 /* LPLU (Low Power Link Up) */
842 static void	wm_lplu_d0_disable(struct wm_softc *);
843 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
844 /* EEE */
845 static void	wm_set_eee_i350(struct wm_softc *);
846 
847 /*
848  * Workarounds (mainly PHY related).
849  * Basically, PHY's workarounds are in the PHY drivers.
850  */
851 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
852 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
853 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
854 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
855 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
856 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
857 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
858 static void	wm_reset_init_script_82575(struct wm_softc *);
859 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
860 static void	wm_pll_workaround_i210(struct wm_softc *);
861 
862 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
863     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
864 
865 /*
866  * Devices supported by this driver.
867  */
868 static const struct wm_product {
869 	pci_vendor_id_t		wmp_vendor;
870 	pci_product_id_t	wmp_product;
871 	const char		*wmp_name;
872 	wm_chip_type		wmp_type;
873 	uint32_t		wmp_flags;
874 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
875 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
876 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
877 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
878 #define WMP_MEDIATYPE(x)	((x) & 0x03)
879 } wm_products[] = {
880 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
881 	  "Intel i82542 1000BASE-X Ethernet",
882 	  WM_T_82542_2_1,	WMP_F_FIBER },
883 
884 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
885 	  "Intel i82543GC 1000BASE-X Ethernet",
886 	  WM_T_82543,		WMP_F_FIBER },
887 
888 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
889 	  "Intel i82543GC 1000BASE-T Ethernet",
890 	  WM_T_82543,		WMP_F_COPPER },
891 
892 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
893 	  "Intel i82544EI 1000BASE-T Ethernet",
894 	  WM_T_82544,		WMP_F_COPPER },
895 
896 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
897 	  "Intel i82544EI 1000BASE-X Ethernet",
898 	  WM_T_82544,		WMP_F_FIBER },
899 
900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
901 	  "Intel i82544GC 1000BASE-T Ethernet",
902 	  WM_T_82544,		WMP_F_COPPER },
903 
904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
905 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
906 	  WM_T_82544,		WMP_F_COPPER },
907 
908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
909 	  "Intel i82540EM 1000BASE-T Ethernet",
910 	  WM_T_82540,		WMP_F_COPPER },
911 
912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
913 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
914 	  WM_T_82540,		WMP_F_COPPER },
915 
916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
917 	  "Intel i82540EP 1000BASE-T Ethernet",
918 	  WM_T_82540,		WMP_F_COPPER },
919 
920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
921 	  "Intel i82540EP 1000BASE-T Ethernet",
922 	  WM_T_82540,		WMP_F_COPPER },
923 
924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
925 	  "Intel i82540EP 1000BASE-T Ethernet",
926 	  WM_T_82540,		WMP_F_COPPER },
927 
928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
929 	  "Intel i82545EM 1000BASE-T Ethernet",
930 	  WM_T_82545,		WMP_F_COPPER },
931 
932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
933 	  "Intel i82545GM 1000BASE-T Ethernet",
934 	  WM_T_82545_3,		WMP_F_COPPER },
935 
936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
937 	  "Intel i82545GM 1000BASE-X Ethernet",
938 	  WM_T_82545_3,		WMP_F_FIBER },
939 
940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
941 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
942 	  WM_T_82545_3,		WMP_F_SERDES },
943 
944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
945 	  "Intel i82546EB 1000BASE-T Ethernet",
946 	  WM_T_82546,		WMP_F_COPPER },
947 
948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
949 	  "Intel i82546EB 1000BASE-T Ethernet",
950 	  WM_T_82546,		WMP_F_COPPER },
951 
952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
953 	  "Intel i82545EM 1000BASE-X Ethernet",
954 	  WM_T_82545,		WMP_F_FIBER },
955 
956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
957 	  "Intel i82546EB 1000BASE-X Ethernet",
958 	  WM_T_82546,		WMP_F_FIBER },
959 
960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
961 	  "Intel i82546GB 1000BASE-T Ethernet",
962 	  WM_T_82546_3,		WMP_F_COPPER },
963 
964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
965 	  "Intel i82546GB 1000BASE-X Ethernet",
966 	  WM_T_82546_3,		WMP_F_FIBER },
967 
968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
969 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
970 	  WM_T_82546_3,		WMP_F_SERDES },
971 
972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
973 	  "i82546GB quad-port Gigabit Ethernet",
974 	  WM_T_82546_3,		WMP_F_COPPER },
975 
976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
977 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
978 	  WM_T_82546_3,		WMP_F_COPPER },
979 
980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
981 	  "Intel PRO/1000MT (82546GB)",
982 	  WM_T_82546_3,		WMP_F_COPPER },
983 
984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
985 	  "Intel i82541EI 1000BASE-T Ethernet",
986 	  WM_T_82541,		WMP_F_COPPER },
987 
988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
989 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
990 	  WM_T_82541,		WMP_F_COPPER },
991 
992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
993 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
994 	  WM_T_82541,		WMP_F_COPPER },
995 
996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
997 	  "Intel i82541ER 1000BASE-T Ethernet",
998 	  WM_T_82541_2,		WMP_F_COPPER },
999 
1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1001 	  "Intel i82541GI 1000BASE-T Ethernet",
1002 	  WM_T_82541_2,		WMP_F_COPPER },
1003 
1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1005 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1006 	  WM_T_82541_2,		WMP_F_COPPER },
1007 
1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1009 	  "Intel i82541PI 1000BASE-T Ethernet",
1010 	  WM_T_82541_2,		WMP_F_COPPER },
1011 
1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1013 	  "Intel i82547EI 1000BASE-T Ethernet",
1014 	  WM_T_82547,		WMP_F_COPPER },
1015 
1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1017 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1018 	  WM_T_82547,		WMP_F_COPPER },
1019 
1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1021 	  "Intel i82547GI 1000BASE-T Ethernet",
1022 	  WM_T_82547_2,		WMP_F_COPPER },
1023 
1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1025 	  "Intel PRO/1000 PT (82571EB)",
1026 	  WM_T_82571,		WMP_F_COPPER },
1027 
1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1029 	  "Intel PRO/1000 PF (82571EB)",
1030 	  WM_T_82571,		WMP_F_FIBER },
1031 
1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1033 	  "Intel PRO/1000 PB (82571EB)",
1034 	  WM_T_82571,		WMP_F_SERDES },
1035 
1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1037 	  "Intel PRO/1000 QT (82571EB)",
1038 	  WM_T_82571,		WMP_F_COPPER },
1039 
1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1041 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1042 	  WM_T_82571,		WMP_F_COPPER, },
1043 
1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1045 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1046 	  WM_T_82571,		WMP_F_COPPER, },
1047 
1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1049 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1050 	  WM_T_82571,		WMP_F_SERDES, },
1051 
1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1053 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1054 	  WM_T_82571,		WMP_F_SERDES, },
1055 
1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1057 	  "Intel 82571EB Quad 1000baseX Ethernet",
1058 	  WM_T_82571,		WMP_F_FIBER, },
1059 
1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1061 	  "Intel i82572EI 1000baseT Ethernet",
1062 	  WM_T_82572,		WMP_F_COPPER },
1063 
1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1065 	  "Intel i82572EI 1000baseX Ethernet",
1066 	  WM_T_82572,		WMP_F_FIBER },
1067 
1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1069 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1070 	  WM_T_82572,		WMP_F_SERDES },
1071 
1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1073 	  "Intel i82572EI 1000baseT Ethernet",
1074 	  WM_T_82572,		WMP_F_COPPER },
1075 
1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1077 	  "Intel i82573E",
1078 	  WM_T_82573,		WMP_F_COPPER },
1079 
1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1081 	  "Intel i82573E IAMT",
1082 	  WM_T_82573,		WMP_F_COPPER },
1083 
1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1085 	  "Intel i82573L Gigabit Ethernet",
1086 	  WM_T_82573,		WMP_F_COPPER },
1087 
1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1089 	  "Intel i82574L",
1090 	  WM_T_82574,		WMP_F_COPPER },
1091 
1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1093 	  "Intel i82574L",
1094 	  WM_T_82574,		WMP_F_COPPER },
1095 
1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1097 	  "Intel i82583V",
1098 	  WM_T_82583,		WMP_F_COPPER },
1099 
1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1101 	  "i80003 dual 1000baseT Ethernet",
1102 	  WM_T_80003,		WMP_F_COPPER },
1103 
1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1105 	  "i80003 dual 1000baseX Ethernet",
1106 	  WM_T_80003,		WMP_F_COPPER },
1107 
1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1109 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1110 	  WM_T_80003,		WMP_F_SERDES },
1111 
1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1113 	  "Intel i80003 1000baseT Ethernet",
1114 	  WM_T_80003,		WMP_F_COPPER },
1115 
1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1117 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1118 	  WM_T_80003,		WMP_F_SERDES },
1119 
1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1121 	  "Intel i82801H (M_AMT) LAN Controller",
1122 	  WM_T_ICH8,		WMP_F_COPPER },
1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1124 	  "Intel i82801H (AMT) LAN Controller",
1125 	  WM_T_ICH8,		WMP_F_COPPER },
1126 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1127 	  "Intel i82801H LAN Controller",
1128 	  WM_T_ICH8,		WMP_F_COPPER },
1129 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1130 	  "Intel i82801H (IFE) LAN Controller",
1131 	  WM_T_ICH8,		WMP_F_COPPER },
1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1133 	  "Intel i82801H (M) LAN Controller",
1134 	  WM_T_ICH8,		WMP_F_COPPER },
1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1136 	  "Intel i82801H IFE (GT) LAN Controller",
1137 	  WM_T_ICH8,		WMP_F_COPPER },
1138 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1139 	  "Intel i82801H IFE (G) LAN Controller",
1140 	  WM_T_ICH8,		WMP_F_COPPER },
1141 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1142 	  "82567V-3 LAN Controller",
1143 	  WM_T_ICH8,		WMP_F_COPPER },
1144 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1145 	  "82801I (AMT) LAN Controller",
1146 	  WM_T_ICH9,		WMP_F_COPPER },
1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1148 	  "82801I LAN Controller",
1149 	  WM_T_ICH9,		WMP_F_COPPER },
1150 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1151 	  "82801I (G) LAN Controller",
1152 	  WM_T_ICH9,		WMP_F_COPPER },
1153 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1154 	  "82801I (GT) LAN Controller",
1155 	  WM_T_ICH9,		WMP_F_COPPER },
1156 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1157 	  "82801I (C) LAN Controller",
1158 	  WM_T_ICH9,		WMP_F_COPPER },
1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1160 	  "82801I mobile LAN Controller",
1161 	  WM_T_ICH9,		WMP_F_COPPER },
1162 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IGP_M_V,
1163 	  "82801I mobile (V) LAN Controller",
1164 	  WM_T_ICH9,		WMP_F_COPPER },
1165 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1166 	  "82801I mobile (AMT) LAN Controller",
1167 	  WM_T_ICH9,		WMP_F_COPPER },
1168 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1169 	  "82567LM-4 LAN Controller",
1170 	  WM_T_ICH9,		WMP_F_COPPER },
1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1172 	  "82567LM-2 LAN Controller",
1173 	  WM_T_ICH10,		WMP_F_COPPER },
1174 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1175 	  "82567LF-2 LAN Controller",
1176 	  WM_T_ICH10,		WMP_F_COPPER },
1177 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1178 	  "82567LM-3 LAN Controller",
1179 	  WM_T_ICH10,		WMP_F_COPPER },
1180 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1181 	  "82567LF-3 LAN Controller",
1182 	  WM_T_ICH10,		WMP_F_COPPER },
1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1184 	  "82567V-2 LAN Controller",
1185 	  WM_T_ICH10,		WMP_F_COPPER },
1186 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1187 	  "82567V-3? LAN Controller",
1188 	  WM_T_ICH10,		WMP_F_COPPER },
1189 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1190 	  "HANKSVILLE LAN Controller",
1191 	  WM_T_ICH10,		WMP_F_COPPER },
1192 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1193 	  "PCH LAN (82577LM) Controller",
1194 	  WM_T_PCH,		WMP_F_COPPER },
1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1196 	  "PCH LAN (82577LC) Controller",
1197 	  WM_T_PCH,		WMP_F_COPPER },
1198 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1199 	  "PCH LAN (82578DM) Controller",
1200 	  WM_T_PCH,		WMP_F_COPPER },
1201 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1202 	  "PCH LAN (82578DC) Controller",
1203 	  WM_T_PCH,		WMP_F_COPPER },
1204 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1205 	  "PCH2 LAN (82579LM) Controller",
1206 	  WM_T_PCH2,		WMP_F_COPPER },
1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1208 	  "PCH2 LAN (82579V) Controller",
1209 	  WM_T_PCH2,		WMP_F_COPPER },
1210 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1211 	  "82575EB dual-1000baseT Ethernet",
1212 	  WM_T_82575,		WMP_F_COPPER },
1213 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1214 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1215 	  WM_T_82575,		WMP_F_SERDES },
1216 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1217 	  "82575GB quad-1000baseT Ethernet",
1218 	  WM_T_82575,		WMP_F_COPPER },
1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1220 	  "82575GB quad-1000baseT Ethernet (PM)",
1221 	  WM_T_82575,		WMP_F_COPPER },
1222 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1223 	  "82576 1000BaseT Ethernet",
1224 	  WM_T_82576,		WMP_F_COPPER },
1225 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1226 	  "82576 1000BaseX Ethernet",
1227 	  WM_T_82576,		WMP_F_FIBER },
1228 
1229 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1230 	  "82576 gigabit Ethernet (SERDES)",
1231 	  WM_T_82576,		WMP_F_SERDES },
1232 
1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1234 	  "82576 quad-1000BaseT Ethernet",
1235 	  WM_T_82576,		WMP_F_COPPER },
1236 
1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1238 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1239 	  WM_T_82576,		WMP_F_COPPER },
1240 
1241 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1242 	  "82576 gigabit Ethernet",
1243 	  WM_T_82576,		WMP_F_COPPER },
1244 
1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1246 	  "82576 gigabit Ethernet (SERDES)",
1247 	  WM_T_82576,		WMP_F_SERDES },
1248 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1249 	  "82576 quad-gigabit Ethernet (SERDES)",
1250 	  WM_T_82576,		WMP_F_SERDES },
1251 
1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1253 	  "82580 1000BaseT Ethernet",
1254 	  WM_T_82580,		WMP_F_COPPER },
1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1256 	  "82580 1000BaseX Ethernet",
1257 	  WM_T_82580,		WMP_F_FIBER },
1258 
1259 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1260 	  "82580 1000BaseT Ethernet (SERDES)",
1261 	  WM_T_82580,		WMP_F_SERDES },
1262 
1263 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1264 	  "82580 gigabit Ethernet (SGMII)",
1265 	  WM_T_82580,		WMP_F_COPPER },
1266 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1267 	  "82580 dual-1000BaseT Ethernet",
1268 	  WM_T_82580,		WMP_F_COPPER },
1269 
1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1271 	  "82580 quad-1000BaseX Ethernet",
1272 	  WM_T_82580,		WMP_F_FIBER },
1273 
1274 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1275 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1276 	  WM_T_82580,		WMP_F_COPPER },
1277 
1278 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1279 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1280 	  WM_T_82580,		WMP_F_SERDES },
1281 
1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1283 	  "DH89XXCC 1000BASE-KX Ethernet",
1284 	  WM_T_82580,		WMP_F_SERDES },
1285 
1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1287 	  "DH89XXCC Gigabit Ethernet (SFP)",
1288 	  WM_T_82580,		WMP_F_SERDES },
1289 
1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1291 	  "I350 Gigabit Network Connection",
1292 	  WM_T_I350,		WMP_F_COPPER },
1293 
1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1295 	  "I350 Gigabit Fiber Network Connection",
1296 	  WM_T_I350,		WMP_F_FIBER },
1297 
1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1299 	  "I350 Gigabit Backplane Connection",
1300 	  WM_T_I350,		WMP_F_SERDES },
1301 
1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1303 	  "I350 Quad Port Gigabit Ethernet",
1304 	  WM_T_I350,		WMP_F_SERDES },
1305 
1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1307 	  "I350 Gigabit Connection",
1308 	  WM_T_I350,		WMP_F_COPPER },
1309 
1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1311 	  "I354 Gigabit Ethernet (KX)",
1312 	  WM_T_I354,		WMP_F_SERDES },
1313 
1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1315 	  "I354 Gigabit Ethernet (SGMII)",
1316 	  WM_T_I354,		WMP_F_COPPER },
1317 
1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1319 	  "I354 Gigabit Ethernet (2.5G)",
1320 	  WM_T_I354,		WMP_F_COPPER },
1321 
1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1323 	  "I210-T1 Ethernet Server Adapter",
1324 	  WM_T_I210,		WMP_F_COPPER },
1325 
1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1327 	  "I210 Ethernet (Copper OEM)",
1328 	  WM_T_I210,		WMP_F_COPPER },
1329 
1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1331 	  "I210 Ethernet (Copper IT)",
1332 	  WM_T_I210,		WMP_F_COPPER },
1333 
1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1335 	  "I210 Ethernet (FLASH less)",
1336 	  WM_T_I210,		WMP_F_COPPER },
1337 
1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1339 	  "I210 Gigabit Ethernet (Fiber)",
1340 	  WM_T_I210,		WMP_F_FIBER },
1341 
1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1343 	  "I210 Gigabit Ethernet (SERDES)",
1344 	  WM_T_I210,		WMP_F_SERDES },
1345 
1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1347 	  "I210 Gigabit Ethernet (FLASH less)",
1348 	  WM_T_I210,		WMP_F_SERDES },
1349 
1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1351 	  "I210 Gigabit Ethernet (SGMII)",
1352 	  WM_T_I210,		WMP_F_COPPER },
1353 
1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1355 	  "I211 Ethernet (COPPER)",
1356 	  WM_T_I211,		WMP_F_COPPER },
1357 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1358 	  "I217 V Ethernet Connection",
1359 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1360 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1361 	  "I217 LM Ethernet Connection",
1362 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1364 	  "I218 V Ethernet Connection",
1365 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1367 	  "I218 V Ethernet Connection",
1368 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1369 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1370 	  "I218 V Ethernet Connection",
1371 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1372 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1373 	  "I218 LM Ethernet Connection",
1374 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1375 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1376 	  "I218 LM Ethernet Connection",
1377 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1378 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1379 	  "I218 LM Ethernet Connection",
1380 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1381 #if 0
1382 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1383 	  "I219 V Ethernet Connection",
1384 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1386 	  "I219 V Ethernet Connection",
1387 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1388 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1389 	  "I219 V Ethernet Connection",
1390 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1391 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1392 	  "I219 V Ethernet Connection",
1393 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1394 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1395 	  "I219 LM Ethernet Connection",
1396 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1398 	  "I219 LM Ethernet Connection",
1399 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1400 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1401 	  "I219 LM Ethernet Connection",
1402 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1403 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1404 	  "I219 LM Ethernet Connection",
1405 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1406 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1407 	  "I219 LM Ethernet Connection",
1408 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1409 #endif
1410 	{ 0,			0,
1411 	  NULL,
1412 	  0,			0 },
1413 };
1414 
1415 /*
1416  * Register read/write functions.
1417  * Other than CSR_{READ|WRITE}().
1418  */
1419 
1420 #if 0 /* Not currently used */
1421 static inline uint32_t
1422 wm_io_read(struct wm_softc *sc, int reg)
1423 {
1424 
1425 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1426 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1427 }
1428 #endif
1429 
1430 static inline void
1431 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1432 {
1433 
1434 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1435 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1436 }
1437 
1438 static inline void
1439 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1440     uint32_t data)
1441 {
1442 	uint32_t regval;
1443 	int i;
1444 
1445 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1446 
1447 	CSR_WRITE(sc, reg, regval);
1448 
1449 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1450 		delay(5);
1451 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1452 			break;
1453 	}
1454 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1455 		aprint_error("%s: WARNING:"
1456 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1457 		    device_xname(sc->sc_dev), reg);
1458 	}
1459 }
1460 
1461 static inline void
1462 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1463 {
1464 	wa->wa_low = htole32(v & 0xffffffffU);
1465 	if (sizeof(bus_addr_t) == 8)
1466 		wa->wa_high = htole32((uint64_t) v >> 32);
1467 	else
1468 		wa->wa_high = 0;
1469 }
1470 
1471 /*
1472  * Descriptor sync/init functions.
1473  */
1474 static inline void
1475 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1476 {
1477 	struct wm_softc *sc = txq->txq_sc;
1478 
1479 	/* If it will wrap around, sync to the end of the ring. */
1480 	if ((start + num) > WM_NTXDESC(txq)) {
1481 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1482 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1483 		    (WM_NTXDESC(txq) - start), ops);
1484 		num -= (WM_NTXDESC(txq) - start);
1485 		start = 0;
1486 	}
1487 
1488 	/* Now sync whatever is left. */
1489 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1490 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1491 }
1492 
1493 static inline void
1494 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1495 {
1496 	struct wm_softc *sc = rxq->rxq_sc;
1497 
1498 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1499 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1500 }
1501 
1502 static inline void
1503 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1504 {
1505 	struct wm_softc *sc = rxq->rxq_sc;
1506 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1507 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1508 	struct mbuf *m = rxs->rxs_mbuf;
1509 
1510 	/*
1511 	 * Note: We scoot the packet forward 2 bytes in the buffer
1512 	 * so that the payload after the Ethernet header is aligned
1513 	 * to a 4-byte boundary.
1514 
1515 	 * XXX BRAINDAMAGE ALERT!
1516 	 * The stupid chip uses the same size for every buffer, which
1517 	 * is set in the Receive Control register.  We are using the 2K
1518 	 * size option, but what we REALLY want is (2K - 2)!  For this
1519 	 * reason, we can't "scoot" packets longer than the standard
1520 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1521 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1522 	 * the upper layer copy the headers.
1523 	 */
1524 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1525 
1526 	wm_set_dma_addr(&rxd->wrx_addr,
1527 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1528 	rxd->wrx_len = 0;
1529 	rxd->wrx_cksum = 0;
1530 	rxd->wrx_status = 0;
1531 	rxd->wrx_errors = 0;
1532 	rxd->wrx_special = 0;
1533 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1534 
1535 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1536 }
1537 
1538 /*
1539  * Device driver interface functions and commonly used functions.
1540  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1541  */
1542 
1543 /* Lookup supported device table */
1544 static const struct wm_product *
1545 wm_lookup(const struct pci_attach_args *pa)
1546 {
1547 	const struct wm_product *wmp;
1548 
1549 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1550 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1551 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1552 			return wmp;
1553 	}
1554 	return NULL;
1555 }
1556 
1557 /* The match function (ca_match) */
1558 static int
1559 wm_match(device_t parent, cfdata_t cf, void *aux)
1560 {
1561 	struct pci_attach_args *pa = aux;
1562 
1563 	if (wm_lookup(pa) != NULL)
1564 		return 1;
1565 
1566 	return 0;
1567 }
1568 
1569 /* The attach function (ca_attach) */
1570 static void
1571 wm_attach(device_t parent, device_t self, void *aux)
1572 {
1573 	struct wm_softc *sc = device_private(self);
1574 	struct pci_attach_args *pa = aux;
1575 	prop_dictionary_t dict;
1576 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1577 	pci_chipset_tag_t pc = pa->pa_pc;
1578 	int counts[PCI_INTR_TYPE_SIZE];
1579 	pci_intr_type_t max_type;
1580 	const char *eetype, *xname;
1581 	bus_space_tag_t memt;
1582 	bus_space_handle_t memh;
1583 	bus_size_t memsize;
1584 	int memh_valid;
1585 	int i, error;
1586 	const struct wm_product *wmp;
1587 	prop_data_t ea;
1588 	prop_number_t pn;
1589 	uint8_t enaddr[ETHER_ADDR_LEN];
1590 	uint16_t cfg1, cfg2, swdpin, nvmword;
1591 	pcireg_t preg, memtype;
1592 	uint16_t eeprom_data, apme_mask;
1593 	bool force_clear_smbi;
1594 	uint32_t link_mode;
1595 	uint32_t reg;
1596 
1597 	sc->sc_dev = self;
1598 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1599 	sc->sc_stopping = false;
1600 
1601 	wmp = wm_lookup(pa);
1602 #ifdef DIAGNOSTIC
1603 	if (wmp == NULL) {
1604 		printf("\n");
1605 		panic("wm_attach: impossible");
1606 	}
1607 #endif
1608 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1609 
1610 	sc->sc_pc = pa->pa_pc;
1611 	sc->sc_pcitag = pa->pa_tag;
1612 
1613 	if (pci_dma64_available(pa))
1614 		sc->sc_dmat = pa->pa_dmat64;
1615 	else
1616 		sc->sc_dmat = pa->pa_dmat;
1617 
1618 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1619 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1620 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1621 
1622 	sc->sc_type = wmp->wmp_type;
1623 
1624 	/* Set default function pointers */
1625 	sc->phy.acquire = wm_get_null;
1626 	sc->phy.release = wm_put_null;
1627 
1628 	if (sc->sc_type < WM_T_82543) {
1629 		if (sc->sc_rev < 2) {
1630 			aprint_error_dev(sc->sc_dev,
1631 			    "i82542 must be at least rev. 2\n");
1632 			return;
1633 		}
1634 		if (sc->sc_rev < 3)
1635 			sc->sc_type = WM_T_82542_2_0;
1636 	}
1637 
1638 	/*
1639 	 * Disable MSI for Errata:
1640 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1641 	 *
1642 	 *  82544: Errata 25
1643 	 *  82540: Errata  6 (easy to reproduce device timeout)
1644 	 *  82545: Errata  4 (easy to reproduce device timeout)
1645 	 *  82546: Errata 26 (easy to reproduce device timeout)
1646 	 *  82541: Errata  7 (easy to reproduce device timeout)
1647 	 *
1648 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1649 	 *
1650 	 *  82571 & 82572: Errata 63
1651 	 */
1652 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1653 	    || (sc->sc_type == WM_T_82572))
1654 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1655 
1656 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1657 	    || (sc->sc_type == WM_T_82580)
1658 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1659 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1660 		sc->sc_flags |= WM_F_NEWQUEUE;
1661 
1662 	/* Set device properties (mactype) */
1663 	dict = device_properties(sc->sc_dev);
1664 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1665 
1666 	/*
1667 	 * Map the device.  All devices support memory-mapped acccess,
1668 	 * and it is really required for normal operation.
1669 	 */
1670 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1671 	switch (memtype) {
1672 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1673 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1674 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1675 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1676 		break;
1677 	default:
1678 		memh_valid = 0;
1679 		break;
1680 	}
1681 
1682 	if (memh_valid) {
1683 		sc->sc_st = memt;
1684 		sc->sc_sh = memh;
1685 		sc->sc_ss = memsize;
1686 	} else {
1687 		aprint_error_dev(sc->sc_dev,
1688 		    "unable to map device registers\n");
1689 		return;
1690 	}
1691 
1692 	/*
1693 	 * In addition, i82544 and later support I/O mapped indirect
1694 	 * register access.  It is not desirable (nor supported in
1695 	 * this driver) to use it for normal operation, though it is
1696 	 * required to work around bugs in some chip versions.
1697 	 */
1698 	if (sc->sc_type >= WM_T_82544) {
1699 		/* First we have to find the I/O BAR. */
1700 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1701 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1702 			if (memtype == PCI_MAPREG_TYPE_IO)
1703 				break;
1704 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1705 			    PCI_MAPREG_MEM_TYPE_64BIT)
1706 				i += 4;	/* skip high bits, too */
1707 		}
1708 		if (i < PCI_MAPREG_END) {
1709 			/*
1710 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1711 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1712 			 * It's no problem because newer chips has no this
1713 			 * bug.
1714 			 *
1715 			 * The i8254x doesn't apparently respond when the
1716 			 * I/O BAR is 0, which looks somewhat like it's not
1717 			 * been configured.
1718 			 */
1719 			preg = pci_conf_read(pc, pa->pa_tag, i);
1720 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1721 				aprint_error_dev(sc->sc_dev,
1722 				    "WARNING: I/O BAR at zero.\n");
1723 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1724 					0, &sc->sc_iot, &sc->sc_ioh,
1725 					NULL, &sc->sc_ios) == 0) {
1726 				sc->sc_flags |= WM_F_IOH_VALID;
1727 			} else {
1728 				aprint_error_dev(sc->sc_dev,
1729 				    "WARNING: unable to map I/O space\n");
1730 			}
1731 		}
1732 
1733 	}
1734 
1735 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1736 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1737 	preg |= PCI_COMMAND_MASTER_ENABLE;
1738 	if (sc->sc_type < WM_T_82542_2_1)
1739 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1740 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1741 
1742 	/* power up chip */
1743 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1744 	    NULL)) && error != EOPNOTSUPP) {
1745 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1746 		return;
1747 	}
1748 
1749 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1750 
1751 	/* Allocation settings */
1752 	max_type = PCI_INTR_TYPE_MSIX;
1753 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1754 	counts[PCI_INTR_TYPE_MSI] = 1;
1755 	counts[PCI_INTR_TYPE_INTX] = 1;
1756 
1757 alloc_retry:
1758 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1759 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1760 		return;
1761 	}
1762 
1763 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1764 		error = wm_setup_msix(sc);
1765 		if (error) {
1766 			pci_intr_release(pc, sc->sc_intrs,
1767 			    counts[PCI_INTR_TYPE_MSIX]);
1768 
1769 			/* Setup for MSI: Disable MSI-X */
1770 			max_type = PCI_INTR_TYPE_MSI;
1771 			counts[PCI_INTR_TYPE_MSI] = 1;
1772 			counts[PCI_INTR_TYPE_INTX] = 1;
1773 			goto alloc_retry;
1774 		}
1775 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1776 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1777 		error = wm_setup_legacy(sc);
1778 		if (error) {
1779 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1780 			    counts[PCI_INTR_TYPE_MSI]);
1781 
1782 			/* The next try is for INTx: Disable MSI */
1783 			max_type = PCI_INTR_TYPE_INTX;
1784 			counts[PCI_INTR_TYPE_INTX] = 1;
1785 			goto alloc_retry;
1786 		}
1787 	} else {
1788 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1789 		error = wm_setup_legacy(sc);
1790 		if (error) {
1791 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1792 			    counts[PCI_INTR_TYPE_INTX]);
1793 			return;
1794 		}
1795 	}
1796 
1797 	/*
1798 	 * Check the function ID (unit number of the chip).
1799 	 */
1800 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1801 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1802 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1803 	    || (sc->sc_type == WM_T_82580)
1804 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1805 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1806 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1807 	else
1808 		sc->sc_funcid = 0;
1809 
1810 	/*
1811 	 * Determine a few things about the bus we're connected to.
1812 	 */
1813 	if (sc->sc_type < WM_T_82543) {
1814 		/* We don't really know the bus characteristics here. */
1815 		sc->sc_bus_speed = 33;
1816 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1817 		/*
1818 		 * CSA (Communication Streaming Architecture) is about as fast
1819 		 * a 32-bit 66MHz PCI Bus.
1820 		 */
1821 		sc->sc_flags |= WM_F_CSA;
1822 		sc->sc_bus_speed = 66;
1823 		aprint_verbose_dev(sc->sc_dev,
1824 		    "Communication Streaming Architecture\n");
1825 		if (sc->sc_type == WM_T_82547) {
1826 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1827 			callout_setfunc(&sc->sc_txfifo_ch,
1828 					wm_82547_txfifo_stall, sc);
1829 			aprint_verbose_dev(sc->sc_dev,
1830 			    "using 82547 Tx FIFO stall work-around\n");
1831 		}
1832 	} else if (sc->sc_type >= WM_T_82571) {
1833 		sc->sc_flags |= WM_F_PCIE;
1834 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1835 		    && (sc->sc_type != WM_T_ICH10)
1836 		    && (sc->sc_type != WM_T_PCH)
1837 		    && (sc->sc_type != WM_T_PCH2)
1838 		    && (sc->sc_type != WM_T_PCH_LPT)
1839 		    && (sc->sc_type != WM_T_PCH_SPT)) {
1840 			/* ICH* and PCH* have no PCIe capability registers */
1841 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1842 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1843 				NULL) == 0)
1844 				aprint_error_dev(sc->sc_dev,
1845 				    "unable to find PCIe capability\n");
1846 		}
1847 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1848 	} else {
1849 		reg = CSR_READ(sc, WMREG_STATUS);
1850 		if (reg & STATUS_BUS64)
1851 			sc->sc_flags |= WM_F_BUS64;
1852 		if ((reg & STATUS_PCIX_MODE) != 0) {
1853 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1854 
1855 			sc->sc_flags |= WM_F_PCIX;
1856 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1857 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1858 				aprint_error_dev(sc->sc_dev,
1859 				    "unable to find PCIX capability\n");
1860 			else if (sc->sc_type != WM_T_82545_3 &&
1861 				 sc->sc_type != WM_T_82546_3) {
1862 				/*
1863 				 * Work around a problem caused by the BIOS
1864 				 * setting the max memory read byte count
1865 				 * incorrectly.
1866 				 */
1867 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1868 				    sc->sc_pcixe_capoff + PCIX_CMD);
1869 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1870 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1871 
1872 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1873 				    PCIX_CMD_BYTECNT_SHIFT;
1874 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1875 				    PCIX_STATUS_MAXB_SHIFT;
1876 				if (bytecnt > maxb) {
1877 					aprint_verbose_dev(sc->sc_dev,
1878 					    "resetting PCI-X MMRBC: %d -> %d\n",
1879 					    512 << bytecnt, 512 << maxb);
1880 					pcix_cmd = (pcix_cmd &
1881 					    ~PCIX_CMD_BYTECNT_MASK) |
1882 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1883 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1884 					    sc->sc_pcixe_capoff + PCIX_CMD,
1885 					    pcix_cmd);
1886 				}
1887 			}
1888 		}
1889 		/*
1890 		 * The quad port adapter is special; it has a PCIX-PCIX
1891 		 * bridge on the board, and can run the secondary bus at
1892 		 * a higher speed.
1893 		 */
1894 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1895 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1896 								      : 66;
1897 		} else if (sc->sc_flags & WM_F_PCIX) {
1898 			switch (reg & STATUS_PCIXSPD_MASK) {
1899 			case STATUS_PCIXSPD_50_66:
1900 				sc->sc_bus_speed = 66;
1901 				break;
1902 			case STATUS_PCIXSPD_66_100:
1903 				sc->sc_bus_speed = 100;
1904 				break;
1905 			case STATUS_PCIXSPD_100_133:
1906 				sc->sc_bus_speed = 133;
1907 				break;
1908 			default:
1909 				aprint_error_dev(sc->sc_dev,
1910 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1911 				    reg & STATUS_PCIXSPD_MASK);
1912 				sc->sc_bus_speed = 66;
1913 				break;
1914 			}
1915 		} else
1916 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1917 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1918 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1919 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1920 	}
1921 
1922 	/* clear interesting stat counters */
1923 	CSR_READ(sc, WMREG_COLC);
1924 	CSR_READ(sc, WMREG_RXERRC);
1925 
1926 	/* get PHY control from SMBus to PCIe */
1927 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
1928 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
1929 		wm_smbustopci(sc);
1930 
1931 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
1932 	    || (sc->sc_type >= WM_T_ICH8))
1933 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1934 	if (sc->sc_type >= WM_T_ICH8)
1935 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1936 
1937 	/* Set PHY, NVM mutex related stuff */
1938 	switch (sc->sc_type) {
1939 	case WM_T_82542_2_0:
1940 	case WM_T_82542_2_1:
1941 	case WM_T_82543:
1942 	case WM_T_82544:
1943 		/* Microwire */
1944 		sc->sc_nvm_wordsize = 64;
1945 		sc->sc_nvm_addrbits = 6;
1946 		break;
1947 	case WM_T_82540:
1948 	case WM_T_82545:
1949 	case WM_T_82545_3:
1950 	case WM_T_82546:
1951 	case WM_T_82546_3:
1952 		/* Microwire */
1953 		reg = CSR_READ(sc, WMREG_EECD);
1954 		if (reg & EECD_EE_SIZE) {
1955 			sc->sc_nvm_wordsize = 256;
1956 			sc->sc_nvm_addrbits = 8;
1957 		} else {
1958 			sc->sc_nvm_wordsize = 64;
1959 			sc->sc_nvm_addrbits = 6;
1960 		}
1961 		sc->sc_flags |= WM_F_LOCK_EECD;
1962 		break;
1963 	case WM_T_82541:
1964 	case WM_T_82541_2:
1965 	case WM_T_82547:
1966 	case WM_T_82547_2:
1967 		sc->sc_flags |= WM_F_LOCK_EECD;
1968 		reg = CSR_READ(sc, WMREG_EECD);
1969 		if (reg & EECD_EE_TYPE) {
1970 			/* SPI */
1971 			sc->sc_flags |= WM_F_EEPROM_SPI;
1972 			wm_nvm_set_addrbits_size_eecd(sc);
1973 		} else {
1974 			/* Microwire */
1975 			if ((reg & EECD_EE_ABITS) != 0) {
1976 				sc->sc_nvm_wordsize = 256;
1977 				sc->sc_nvm_addrbits = 8;
1978 			} else {
1979 				sc->sc_nvm_wordsize = 64;
1980 				sc->sc_nvm_addrbits = 6;
1981 			}
1982 		}
1983 		break;
1984 	case WM_T_82571:
1985 	case WM_T_82572:
1986 		/* SPI */
1987 		sc->sc_flags |= WM_F_EEPROM_SPI;
1988 		wm_nvm_set_addrbits_size_eecd(sc);
1989 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
1990 		sc->phy.acquire = wm_get_swsm_semaphore;
1991 		sc->phy.release = wm_put_swsm_semaphore;
1992 		break;
1993 	case WM_T_82573:
1994 	case WM_T_82574:
1995 	case WM_T_82583:
1996 		if (sc->sc_type == WM_T_82573) {
1997 			sc->sc_flags |= WM_F_LOCK_SWSM;
1998 			sc->phy.acquire = wm_get_swsm_semaphore;
1999 			sc->phy.release = wm_put_swsm_semaphore;
2000 		} else {
2001 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
2002 			/* Both PHY and NVM use the same semaphore. */
2003 			sc->phy.acquire
2004 			    = wm_get_swfwhw_semaphore;
2005 			sc->phy.release
2006 			    = wm_put_swfwhw_semaphore;
2007 		}
2008 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2009 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2010 			sc->sc_nvm_wordsize = 2048;
2011 		} else {
2012 			/* SPI */
2013 			sc->sc_flags |= WM_F_EEPROM_SPI;
2014 			wm_nvm_set_addrbits_size_eecd(sc);
2015 		}
2016 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2017 		break;
2018 	case WM_T_82575:
2019 	case WM_T_82576:
2020 	case WM_T_82580:
2021 	case WM_T_I350:
2022 	case WM_T_I354:
2023 	case WM_T_80003:
2024 		/* SPI */
2025 		sc->sc_flags |= WM_F_EEPROM_SPI;
2026 		wm_nvm_set_addrbits_size_eecd(sc);
2027 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2028 		    | WM_F_LOCK_SWSM;
2029 		sc->phy.acquire = wm_get_phy_82575;
2030 		sc->phy.release = wm_put_phy_82575;
2031 		break;
2032 	case WM_T_ICH8:
2033 	case WM_T_ICH9:
2034 	case WM_T_ICH10:
2035 	case WM_T_PCH:
2036 	case WM_T_PCH2:
2037 	case WM_T_PCH_LPT:
2038 		/* FLASH */
2039 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2040 		sc->sc_nvm_wordsize = 2048;
2041 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2042 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2043 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2044 			aprint_error_dev(sc->sc_dev,
2045 			    "can't map FLASH registers\n");
2046 			goto out;
2047 		}
2048 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2049 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2050 		    ICH_FLASH_SECTOR_SIZE;
2051 		sc->sc_ich8_flash_bank_size =
2052 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2053 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2054 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2055 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2056 		sc->sc_flashreg_offset = 0;
2057 		sc->phy.acquire = wm_get_swflag_ich8lan;
2058 		sc->phy.release = wm_put_swflag_ich8lan;
2059 		break;
2060 	case WM_T_PCH_SPT:
2061 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2062 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2063 		sc->sc_flasht = sc->sc_st;
2064 		sc->sc_flashh = sc->sc_sh;
2065 		sc->sc_ich8_flash_base = 0;
2066 		sc->sc_nvm_wordsize =
2067 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2068 			* NVM_SIZE_MULTIPLIER;
2069 		/* It is size in bytes, we want words */
2070 		sc->sc_nvm_wordsize /= 2;
2071 		/* assume 2 banks */
2072 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2073 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2074 		sc->phy.acquire = wm_get_swflag_ich8lan;
2075 		sc->phy.release = wm_put_swflag_ich8lan;
2076 		break;
2077 	case WM_T_I210:
2078 	case WM_T_I211:
2079 		if (wm_nvm_get_flash_presence_i210(sc)) {
2080 			wm_nvm_set_addrbits_size_eecd(sc);
2081 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2082 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2083 		} else {
2084 			sc->sc_nvm_wordsize = INVM_SIZE;
2085 			sc->sc_flags |= WM_F_EEPROM_INVM;
2086 		}
2087 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
2088 		sc->phy.acquire = wm_get_phy_82575;
2089 		sc->phy.release = wm_put_phy_82575;
2090 		break;
2091 	default:
2092 		break;
2093 	}
2094 
2095 	/* Reset the chip to a known state. */
2096 	wm_reset(sc);
2097 
2098 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2099 	switch (sc->sc_type) {
2100 	case WM_T_82571:
2101 	case WM_T_82572:
2102 		reg = CSR_READ(sc, WMREG_SWSM2);
2103 		if ((reg & SWSM2_LOCK) == 0) {
2104 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2105 			force_clear_smbi = true;
2106 		} else
2107 			force_clear_smbi = false;
2108 		break;
2109 	case WM_T_82573:
2110 	case WM_T_82574:
2111 	case WM_T_82583:
2112 		force_clear_smbi = true;
2113 		break;
2114 	default:
2115 		force_clear_smbi = false;
2116 		break;
2117 	}
2118 	if (force_clear_smbi) {
2119 		reg = CSR_READ(sc, WMREG_SWSM);
2120 		if ((reg & SWSM_SMBI) != 0)
2121 			aprint_error_dev(sc->sc_dev,
2122 			    "Please update the Bootagent\n");
2123 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2124 	}
2125 
2126 	/*
2127 	 * Defer printing the EEPROM type until after verifying the checksum
2128 	 * This allows the EEPROM type to be printed correctly in the case
2129 	 * that no EEPROM is attached.
2130 	 */
2131 	/*
2132 	 * Validate the EEPROM checksum. If the checksum fails, flag
2133 	 * this for later, so we can fail future reads from the EEPROM.
2134 	 */
2135 	if (wm_nvm_validate_checksum(sc)) {
2136 		/*
2137 		 * Read twice again because some PCI-e parts fail the
2138 		 * first check due to the link being in sleep state.
2139 		 */
2140 		if (wm_nvm_validate_checksum(sc))
2141 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2142 	}
2143 
2144 	/* Set device properties (macflags) */
2145 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2146 
2147 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2148 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2149 	else {
2150 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2151 		    sc->sc_nvm_wordsize);
2152 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2153 			aprint_verbose("iNVM");
2154 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2155 			aprint_verbose("FLASH(HW)");
2156 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2157 			aprint_verbose("FLASH");
2158 		else {
2159 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2160 				eetype = "SPI";
2161 			else
2162 				eetype = "MicroWire";
2163 			aprint_verbose("(%d address bits) %s EEPROM",
2164 			    sc->sc_nvm_addrbits, eetype);
2165 		}
2166 	}
2167 	wm_nvm_version(sc);
2168 	aprint_verbose("\n");
2169 
2170 	/* Check for I21[01] PLL workaround */
2171 	if (sc->sc_type == WM_T_I210)
2172 		sc->sc_flags |= WM_F_PLL_WA_I210;
2173 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2174 		/* NVM image release 3.25 has a workaround */
2175 		if ((sc->sc_nvm_ver_major < 3)
2176 		    || ((sc->sc_nvm_ver_major == 3)
2177 			&& (sc->sc_nvm_ver_minor < 25))) {
2178 			aprint_verbose_dev(sc->sc_dev,
2179 			    "ROM image version %d.%d is older than 3.25\n",
2180 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2181 			sc->sc_flags |= WM_F_PLL_WA_I210;
2182 		}
2183 	}
2184 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2185 		wm_pll_workaround_i210(sc);
2186 
2187 	wm_get_wakeup(sc);
2188 	switch (sc->sc_type) {
2189 	case WM_T_82571:
2190 	case WM_T_82572:
2191 	case WM_T_82573:
2192 	case WM_T_82574:
2193 	case WM_T_82583:
2194 	case WM_T_80003:
2195 	case WM_T_ICH8:
2196 	case WM_T_ICH9:
2197 	case WM_T_ICH10:
2198 	case WM_T_PCH:
2199 	case WM_T_PCH2:
2200 	case WM_T_PCH_LPT:
2201 	case WM_T_PCH_SPT:
2202 		/* Non-AMT based hardware can now take control from firmware */
2203 		if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2204 			wm_get_hw_control(sc);
2205 		break;
2206 	default:
2207 		break;
2208 	}
2209 
2210 	/*
2211 	 * Read the Ethernet address from the EEPROM, if not first found
2212 	 * in device properties.
2213 	 */
2214 	ea = prop_dictionary_get(dict, "mac-address");
2215 	if (ea != NULL) {
2216 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2217 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2218 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2219 	} else {
2220 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2221 			aprint_error_dev(sc->sc_dev,
2222 			    "unable to read Ethernet address\n");
2223 			goto out;
2224 		}
2225 	}
2226 
2227 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2228 	    ether_sprintf(enaddr));
2229 
2230 	/*
2231 	 * Read the config info from the EEPROM, and set up various
2232 	 * bits in the control registers based on their contents.
2233 	 */
2234 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2235 	if (pn != NULL) {
2236 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2237 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2238 	} else {
2239 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2240 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2241 			goto out;
2242 		}
2243 	}
2244 
2245 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2246 	if (pn != NULL) {
2247 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2248 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2249 	} else {
2250 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2251 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2252 			goto out;
2253 		}
2254 	}
2255 
2256 	/* check for WM_F_WOL */
2257 	switch (sc->sc_type) {
2258 	case WM_T_82542_2_0:
2259 	case WM_T_82542_2_1:
2260 	case WM_T_82543:
2261 		/* dummy? */
2262 		eeprom_data = 0;
2263 		apme_mask = NVM_CFG3_APME;
2264 		break;
2265 	case WM_T_82544:
2266 		apme_mask = NVM_CFG2_82544_APM_EN;
2267 		eeprom_data = cfg2;
2268 		break;
2269 	case WM_T_82546:
2270 	case WM_T_82546_3:
2271 	case WM_T_82571:
2272 	case WM_T_82572:
2273 	case WM_T_82573:
2274 	case WM_T_82574:
2275 	case WM_T_82583:
2276 	case WM_T_80003:
2277 	default:
2278 		apme_mask = NVM_CFG3_APME;
2279 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2280 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2281 		break;
2282 	case WM_T_82575:
2283 	case WM_T_82576:
2284 	case WM_T_82580:
2285 	case WM_T_I350:
2286 	case WM_T_I354: /* XXX ok? */
2287 	case WM_T_ICH8:
2288 	case WM_T_ICH9:
2289 	case WM_T_ICH10:
2290 	case WM_T_PCH:
2291 	case WM_T_PCH2:
2292 	case WM_T_PCH_LPT:
2293 	case WM_T_PCH_SPT:
2294 		/* XXX The funcid should be checked on some devices */
2295 		apme_mask = WUC_APME;
2296 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2297 		break;
2298 	}
2299 
2300 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2301 	if ((eeprom_data & apme_mask) != 0)
2302 		sc->sc_flags |= WM_F_WOL;
2303 #ifdef WM_DEBUG
2304 	if ((sc->sc_flags & WM_F_WOL) != 0)
2305 		printf("WOL\n");
2306 #endif
2307 
2308 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2309 		/* Check NVM for autonegotiation */
2310 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2311 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2312 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2313 		}
2314 	}
2315 
2316 	/*
2317 	 * XXX need special handling for some multiple port cards
2318 	 * to disable a paticular port.
2319 	 */
2320 
2321 	if (sc->sc_type >= WM_T_82544) {
2322 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2323 		if (pn != NULL) {
2324 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2325 			swdpin = (uint16_t) prop_number_integer_value(pn);
2326 		} else {
2327 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2328 				aprint_error_dev(sc->sc_dev,
2329 				    "unable to read SWDPIN\n");
2330 				goto out;
2331 			}
2332 		}
2333 	}
2334 
2335 	if (cfg1 & NVM_CFG1_ILOS)
2336 		sc->sc_ctrl |= CTRL_ILOS;
2337 
2338 	/*
2339 	 * XXX
2340 	 * This code isn't correct because pin 2 and 3 are located
2341 	 * in different position on newer chips. Check all datasheet.
2342 	 *
2343 	 * Until resolve this problem, check if a chip < 82580
2344 	 */
2345 	if (sc->sc_type <= WM_T_82580) {
2346 		if (sc->sc_type >= WM_T_82544) {
2347 			sc->sc_ctrl |=
2348 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2349 			    CTRL_SWDPIO_SHIFT;
2350 			sc->sc_ctrl |=
2351 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2352 			    CTRL_SWDPINS_SHIFT;
2353 		} else {
2354 			sc->sc_ctrl |=
2355 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2356 			    CTRL_SWDPIO_SHIFT;
2357 		}
2358 	}
2359 
2360 	/* XXX For other than 82580? */
2361 	if (sc->sc_type == WM_T_82580) {
2362 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2363 		if (nvmword & __BIT(13))
2364 			sc->sc_ctrl |= CTRL_ILOS;
2365 	}
2366 
2367 #if 0
2368 	if (sc->sc_type >= WM_T_82544) {
2369 		if (cfg1 & NVM_CFG1_IPS0)
2370 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2371 		if (cfg1 & NVM_CFG1_IPS1)
2372 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2373 		sc->sc_ctrl_ext |=
2374 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2375 		    CTRL_EXT_SWDPIO_SHIFT;
2376 		sc->sc_ctrl_ext |=
2377 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2378 		    CTRL_EXT_SWDPINS_SHIFT;
2379 	} else {
2380 		sc->sc_ctrl_ext |=
2381 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2382 		    CTRL_EXT_SWDPIO_SHIFT;
2383 	}
2384 #endif
2385 
2386 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2387 #if 0
2388 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2389 #endif
2390 
2391 	if (sc->sc_type == WM_T_PCH) {
2392 		uint16_t val;
2393 
2394 		/* Save the NVM K1 bit setting */
2395 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2396 
2397 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2398 			sc->sc_nvm_k1_enabled = 1;
2399 		else
2400 			sc->sc_nvm_k1_enabled = 0;
2401 	}
2402 
2403 	/*
2404 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2405 	 * media structures accordingly.
2406 	 */
2407 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2408 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2409 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2410 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2411 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2412 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
2413 		wm_gmii_mediainit(sc, wmp->wmp_product);
2414 	} else if (sc->sc_type < WM_T_82543 ||
2415 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2416 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2417 			aprint_error_dev(sc->sc_dev,
2418 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2419 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2420 		}
2421 		wm_tbi_mediainit(sc);
2422 	} else {
2423 		switch (sc->sc_type) {
2424 		case WM_T_82575:
2425 		case WM_T_82576:
2426 		case WM_T_82580:
2427 		case WM_T_I350:
2428 		case WM_T_I354:
2429 		case WM_T_I210:
2430 		case WM_T_I211:
2431 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
2432 			link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2433 			switch (link_mode) {
2434 			case CTRL_EXT_LINK_MODE_1000KX:
2435 				aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2436 				sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2437 				break;
2438 			case CTRL_EXT_LINK_MODE_SGMII:
2439 				if (wm_sgmii_uses_mdio(sc)) {
2440 					aprint_verbose_dev(sc->sc_dev,
2441 					    "SGMII(MDIO)\n");
2442 					sc->sc_flags |= WM_F_SGMII;
2443 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2444 					break;
2445 				}
2446 				aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2447 				/*FALLTHROUGH*/
2448 			case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2449 				sc->sc_mediatype = wm_sfp_get_media_type(sc);
2450 				if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2451 					if (link_mode
2452 					    == CTRL_EXT_LINK_MODE_SGMII) {
2453 						sc->sc_mediatype
2454 						    = WM_MEDIATYPE_COPPER;
2455 						sc->sc_flags |= WM_F_SGMII;
2456 					} else {
2457 						sc->sc_mediatype
2458 						    = WM_MEDIATYPE_SERDES;
2459 						aprint_verbose_dev(sc->sc_dev,
2460 						    "SERDES\n");
2461 					}
2462 					break;
2463 				}
2464 				if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2465 					aprint_verbose_dev(sc->sc_dev,
2466 					    "SERDES\n");
2467 
2468 				/* Change current link mode setting */
2469 				reg &= ~CTRL_EXT_LINK_MODE_MASK;
2470 				switch (sc->sc_mediatype) {
2471 				case WM_MEDIATYPE_COPPER:
2472 					reg |= CTRL_EXT_LINK_MODE_SGMII;
2473 					break;
2474 				case WM_MEDIATYPE_SERDES:
2475 					reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2476 					break;
2477 				default:
2478 					break;
2479 				}
2480 				CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2481 				break;
2482 			case CTRL_EXT_LINK_MODE_GMII:
2483 			default:
2484 				aprint_verbose_dev(sc->sc_dev, "Copper\n");
2485 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2486 				break;
2487 			}
2488 
2489 			reg &= ~CTRL_EXT_I2C_ENA;
2490 			if ((sc->sc_flags & WM_F_SGMII) != 0)
2491 				reg |= CTRL_EXT_I2C_ENA;
2492 			else
2493 				reg &= ~CTRL_EXT_I2C_ENA;
2494 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2495 
2496 			if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2497 				wm_gmii_mediainit(sc, wmp->wmp_product);
2498 			else
2499 				wm_tbi_mediainit(sc);
2500 			break;
2501 		default:
2502 			if (sc->sc_mediatype == WM_MEDIATYPE_FIBER)
2503 				aprint_error_dev(sc->sc_dev,
2504 				    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2505 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2506 			wm_gmii_mediainit(sc, wmp->wmp_product);
2507 		}
2508 	}
2509 
2510 	ifp = &sc->sc_ethercom.ec_if;
2511 	xname = device_xname(sc->sc_dev);
2512 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2513 	ifp->if_softc = sc;
2514 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2515 	ifp->if_extflags = IFEF_START_MPSAFE;
2516 	ifp->if_ioctl = wm_ioctl;
2517 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2518 		ifp->if_start = wm_nq_start;
2519 		if (sc->sc_nqueues > 1)
2520 			ifp->if_transmit = wm_nq_transmit;
2521 	} else
2522 		ifp->if_start = wm_start;
2523 	ifp->if_watchdog = wm_watchdog;
2524 	ifp->if_init = wm_init;
2525 	ifp->if_stop = wm_stop;
2526 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2527 	IFQ_SET_READY(&ifp->if_snd);
2528 
2529 	/* Check for jumbo frame */
2530 	switch (sc->sc_type) {
2531 	case WM_T_82573:
2532 		/* XXX limited to 9234 if ASPM is disabled */
2533 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2534 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2535 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2536 		break;
2537 	case WM_T_82571:
2538 	case WM_T_82572:
2539 	case WM_T_82574:
2540 	case WM_T_82575:
2541 	case WM_T_82576:
2542 	case WM_T_82580:
2543 	case WM_T_I350:
2544 	case WM_T_I354: /* XXXX ok? */
2545 	case WM_T_I210:
2546 	case WM_T_I211:
2547 	case WM_T_80003:
2548 	case WM_T_ICH9:
2549 	case WM_T_ICH10:
2550 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2551 	case WM_T_PCH_LPT:
2552 	case WM_T_PCH_SPT:
2553 		/* XXX limited to 9234 */
2554 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2555 		break;
2556 	case WM_T_PCH:
2557 		/* XXX limited to 4096 */
2558 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2559 		break;
2560 	case WM_T_82542_2_0:
2561 	case WM_T_82542_2_1:
2562 	case WM_T_82583:
2563 	case WM_T_ICH8:
2564 		/* No support for jumbo frame */
2565 		break;
2566 	default:
2567 		/* ETHER_MAX_LEN_JUMBO */
2568 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2569 		break;
2570 	}
2571 
2572 	/* If we're a i82543 or greater, we can support VLANs. */
2573 	if (sc->sc_type >= WM_T_82543)
2574 		sc->sc_ethercom.ec_capabilities |=
2575 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2576 
2577 	/*
2578 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2579 	 * on i82543 and later.
2580 	 */
2581 	if (sc->sc_type >= WM_T_82543) {
2582 		ifp->if_capabilities |=
2583 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2584 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2585 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2586 		    IFCAP_CSUM_TCPv6_Tx |
2587 		    IFCAP_CSUM_UDPv6_Tx;
2588 	}
2589 
2590 	/*
2591 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2592 	 *
2593 	 *	82541GI (8086:1076) ... no
2594 	 *	82572EI (8086:10b9) ... yes
2595 	 */
2596 	if (sc->sc_type >= WM_T_82571) {
2597 		ifp->if_capabilities |=
2598 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2599 	}
2600 
2601 	/*
2602 	 * If we're a i82544 or greater (except i82547), we can do
2603 	 * TCP segmentation offload.
2604 	 */
2605 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2606 		ifp->if_capabilities |= IFCAP_TSOv4;
2607 	}
2608 
2609 	if (sc->sc_type >= WM_T_82571) {
2610 		ifp->if_capabilities |= IFCAP_TSOv6;
2611 	}
2612 
2613 #ifdef WM_MPSAFE
2614 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2615 #else
2616 	sc->sc_core_lock = NULL;
2617 #endif
2618 
2619 	/* Attach the interface. */
2620 	if_initialize(ifp);
2621 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2622 	ether_ifattach(ifp, enaddr);
2623 	if_register(ifp);
2624 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2625 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2626 			  RND_FLAG_DEFAULT);
2627 
2628 #ifdef WM_EVENT_COUNTERS
2629 	/* Attach event counters. */
2630 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2631 	    NULL, xname, "linkintr");
2632 
2633 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2634 	    NULL, xname, "tx_xoff");
2635 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2636 	    NULL, xname, "tx_xon");
2637 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2638 	    NULL, xname, "rx_xoff");
2639 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2640 	    NULL, xname, "rx_xon");
2641 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2642 	    NULL, xname, "rx_macctl");
2643 #endif /* WM_EVENT_COUNTERS */
2644 
2645 	if (pmf_device_register(self, wm_suspend, wm_resume))
2646 		pmf_class_network_register(self, ifp);
2647 	else
2648 		aprint_error_dev(self, "couldn't establish power handler\n");
2649 
2650 	sc->sc_flags |= WM_F_ATTACHED;
2651  out:
2652 	return;
2653 }
2654 
2655 /* The detach function (ca_detach) */
2656 static int
2657 wm_detach(device_t self, int flags __unused)
2658 {
2659 	struct wm_softc *sc = device_private(self);
2660 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2661 	int i;
2662 
2663 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2664 		return 0;
2665 
2666 	/* Stop the interface. Callouts are stopped in it. */
2667 	wm_stop(ifp, 1);
2668 
2669 	pmf_device_deregister(self);
2670 
2671 	/* Tell the firmware about the release */
2672 	WM_CORE_LOCK(sc);
2673 	wm_release_manageability(sc);
2674 	wm_release_hw_control(sc);
2675 	WM_CORE_UNLOCK(sc);
2676 
2677 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2678 
2679 	/* Delete all remaining media. */
2680 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2681 
2682 	ether_ifdetach(ifp);
2683 	if_detach(ifp);
2684 	if_percpuq_destroy(sc->sc_ipq);
2685 
2686 	/* Unload RX dmamaps and free mbufs */
2687 	for (i = 0; i < sc->sc_nqueues; i++) {
2688 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2689 		mutex_enter(rxq->rxq_lock);
2690 		wm_rxdrain(rxq);
2691 		mutex_exit(rxq->rxq_lock);
2692 	}
2693 	/* Must unlock here */
2694 
2695 	/* Disestablish the interrupt handler */
2696 	for (i = 0; i < sc->sc_nintrs; i++) {
2697 		if (sc->sc_ihs[i] != NULL) {
2698 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2699 			sc->sc_ihs[i] = NULL;
2700 		}
2701 	}
2702 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2703 
2704 	wm_free_txrx_queues(sc);
2705 
2706 	/* Unmap the registers */
2707 	if (sc->sc_ss) {
2708 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2709 		sc->sc_ss = 0;
2710 	}
2711 	if (sc->sc_ios) {
2712 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2713 		sc->sc_ios = 0;
2714 	}
2715 	if (sc->sc_flashs) {
2716 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2717 		sc->sc_flashs = 0;
2718 	}
2719 
2720 	if (sc->sc_core_lock)
2721 		mutex_obj_free(sc->sc_core_lock);
2722 	if (sc->sc_ich_phymtx)
2723 		mutex_obj_free(sc->sc_ich_phymtx);
2724 	if (sc->sc_ich_nvmmtx)
2725 		mutex_obj_free(sc->sc_ich_nvmmtx);
2726 
2727 	return 0;
2728 }
2729 
2730 static bool
2731 wm_suspend(device_t self, const pmf_qual_t *qual)
2732 {
2733 	struct wm_softc *sc = device_private(self);
2734 
2735 	wm_release_manageability(sc);
2736 	wm_release_hw_control(sc);
2737 #ifdef WM_WOL
2738 	wm_enable_wakeup(sc);
2739 #endif
2740 
2741 	return true;
2742 }
2743 
2744 static bool
2745 wm_resume(device_t self, const pmf_qual_t *qual)
2746 {
2747 	struct wm_softc *sc = device_private(self);
2748 
2749 	wm_init_manageability(sc);
2750 
2751 	return true;
2752 }
2753 
2754 /*
2755  * wm_watchdog:		[ifnet interface function]
2756  *
2757  *	Watchdog timer handler.
2758  */
2759 static void
2760 wm_watchdog(struct ifnet *ifp)
2761 {
2762 	int qid;
2763 	struct wm_softc *sc = ifp->if_softc;
2764 
2765 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
2766 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2767 
2768 		wm_watchdog_txq(ifp, txq);
2769 	}
2770 
2771 	/* Reset the interface. */
2772 	(void) wm_init(ifp);
2773 
2774 	/*
2775 	 * There are still some upper layer processing which call
2776 	 * ifp->if_start(). e.g. ALTQ
2777 	 */
2778 	/* Try to get more packets going. */
2779 	ifp->if_start(ifp);
2780 }
2781 
2782 static void
2783 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2784 {
2785 	struct wm_softc *sc = ifp->if_softc;
2786 
2787 	/*
2788 	 * Since we're using delayed interrupts, sweep up
2789 	 * before we report an error.
2790 	 */
2791 	mutex_enter(txq->txq_lock);
2792 	wm_txeof(sc, txq);
2793 	mutex_exit(txq->txq_lock);
2794 
2795 	if (txq->txq_free != WM_NTXDESC(txq)) {
2796 #ifdef WM_DEBUG
2797 		int i, j;
2798 		struct wm_txsoft *txs;
2799 #endif
2800 		log(LOG_ERR,
2801 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2802 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2803 		    txq->txq_next);
2804 		ifp->if_oerrors++;
2805 #ifdef WM_DEBUG
2806 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
2807 		    i = WM_NEXTTXS(txq, i)) {
2808 		    txs = &txq->txq_soft[i];
2809 		    printf("txs %d tx %d -> %d\n",
2810 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2811 		    for (j = txs->txs_firstdesc; ;
2812 			j = WM_NEXTTX(txq, j)) {
2813 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2814 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2815 			printf("\t %#08x%08x\n",
2816 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2817 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2818 			if (j == txs->txs_lastdesc)
2819 				break;
2820 			}
2821 		}
2822 #endif
2823 	}
2824 }
2825 
2826 /*
2827  * wm_tick:
2828  *
2829  *	One second timer, used to check link status, sweep up
2830  *	completed transmit jobs, etc.
2831  */
2832 static void
2833 wm_tick(void *arg)
2834 {
2835 	struct wm_softc *sc = arg;
2836 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2837 #ifndef WM_MPSAFE
2838 	int s = splnet();
2839 #endif
2840 
2841 	WM_CORE_LOCK(sc);
2842 
2843 	if (sc->sc_stopping)
2844 		goto out;
2845 
2846 	if (sc->sc_type >= WM_T_82542_2_1) {
2847 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2848 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2849 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2850 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2851 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2852 	}
2853 
2854 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2855 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
2856 	    + CSR_READ(sc, WMREG_CRCERRS)
2857 	    + CSR_READ(sc, WMREG_ALGNERRC)
2858 	    + CSR_READ(sc, WMREG_SYMERRC)
2859 	    + CSR_READ(sc, WMREG_RXERRC)
2860 	    + CSR_READ(sc, WMREG_SEC)
2861 	    + CSR_READ(sc, WMREG_CEXTERR)
2862 	    + CSR_READ(sc, WMREG_RLEC);
2863 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC) + CSR_READ(sc, WMREG_RNBC);
2864 
2865 	if (sc->sc_flags & WM_F_HAS_MII)
2866 		mii_tick(&sc->sc_mii);
2867 	else if ((sc->sc_type >= WM_T_82575)
2868 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2869 		wm_serdes_tick(sc);
2870 	else
2871 		wm_tbi_tick(sc);
2872 
2873 out:
2874 	WM_CORE_UNLOCK(sc);
2875 #ifndef WM_MPSAFE
2876 	splx(s);
2877 #endif
2878 
2879 	if (!sc->sc_stopping)
2880 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2881 }
2882 
2883 static int
2884 wm_ifflags_cb(struct ethercom *ec)
2885 {
2886 	struct ifnet *ifp = &ec->ec_if;
2887 	struct wm_softc *sc = ifp->if_softc;
2888 	int rc = 0;
2889 
2890 	WM_CORE_LOCK(sc);
2891 
2892 	int change = ifp->if_flags ^ sc->sc_if_flags;
2893 	sc->sc_if_flags = ifp->if_flags;
2894 
2895 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2896 		rc = ENETRESET;
2897 		goto out;
2898 	}
2899 
2900 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2901 		wm_set_filter(sc);
2902 
2903 	wm_set_vlan(sc);
2904 
2905 out:
2906 	WM_CORE_UNLOCK(sc);
2907 
2908 	return rc;
2909 }
2910 
2911 /*
2912  * wm_ioctl:		[ifnet interface function]
2913  *
2914  *	Handle control requests from the operator.
2915  */
2916 static int
2917 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2918 {
2919 	struct wm_softc *sc = ifp->if_softc;
2920 	struct ifreq *ifr = (struct ifreq *) data;
2921 	struct ifaddr *ifa = (struct ifaddr *)data;
2922 	struct sockaddr_dl *sdl;
2923 	int s, error;
2924 
2925 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2926 		device_xname(sc->sc_dev), __func__));
2927 
2928 #ifndef WM_MPSAFE
2929 	s = splnet();
2930 #endif
2931 	switch (cmd) {
2932 	case SIOCSIFMEDIA:
2933 	case SIOCGIFMEDIA:
2934 		WM_CORE_LOCK(sc);
2935 		/* Flow control requires full-duplex mode. */
2936 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2937 		    (ifr->ifr_media & IFM_FDX) == 0)
2938 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2939 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2940 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2941 				/* We can do both TXPAUSE and RXPAUSE. */
2942 				ifr->ifr_media |=
2943 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2944 			}
2945 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2946 		}
2947 		WM_CORE_UNLOCK(sc);
2948 #ifdef WM_MPSAFE
2949 		s = splnet();
2950 #endif
2951 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2952 #ifdef WM_MPSAFE
2953 		splx(s);
2954 #endif
2955 		break;
2956 	case SIOCINITIFADDR:
2957 		WM_CORE_LOCK(sc);
2958 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2959 			sdl = satosdl(ifp->if_dl->ifa_addr);
2960 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2961 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2962 			/* unicast address is first multicast entry */
2963 			wm_set_filter(sc);
2964 			error = 0;
2965 			WM_CORE_UNLOCK(sc);
2966 			break;
2967 		}
2968 		WM_CORE_UNLOCK(sc);
2969 		/*FALLTHROUGH*/
2970 	default:
2971 #ifdef WM_MPSAFE
2972 		s = splnet();
2973 #endif
2974 		/* It may call wm_start, so unlock here */
2975 		error = ether_ioctl(ifp, cmd, data);
2976 #ifdef WM_MPSAFE
2977 		splx(s);
2978 #endif
2979 		if (error != ENETRESET)
2980 			break;
2981 
2982 		error = 0;
2983 
2984 		if (cmd == SIOCSIFCAP) {
2985 			error = (*ifp->if_init)(ifp);
2986 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2987 			;
2988 		else if (ifp->if_flags & IFF_RUNNING) {
2989 			/*
2990 			 * Multicast list has changed; set the hardware filter
2991 			 * accordingly.
2992 			 */
2993 			WM_CORE_LOCK(sc);
2994 			wm_set_filter(sc);
2995 			WM_CORE_UNLOCK(sc);
2996 		}
2997 		break;
2998 	}
2999 
3000 #ifndef WM_MPSAFE
3001 	splx(s);
3002 #endif
3003 	return error;
3004 }
3005 
3006 /* MAC address related */
3007 
3008 /*
3009  * Get the offset of MAC address and return it.
3010  * If error occured, use offset 0.
3011  */
3012 static uint16_t
3013 wm_check_alt_mac_addr(struct wm_softc *sc)
3014 {
3015 	uint16_t myea[ETHER_ADDR_LEN / 2];
3016 	uint16_t offset = NVM_OFF_MACADDR;
3017 
3018 	/* Try to read alternative MAC address pointer */
3019 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3020 		return 0;
3021 
3022 	/* Check pointer if it's valid or not. */
3023 	if ((offset == 0x0000) || (offset == 0xffff))
3024 		return 0;
3025 
3026 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3027 	/*
3028 	 * Check whether alternative MAC address is valid or not.
3029 	 * Some cards have non 0xffff pointer but those don't use
3030 	 * alternative MAC address in reality.
3031 	 *
3032 	 * Check whether the broadcast bit is set or not.
3033 	 */
3034 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3035 		if (((myea[0] & 0xff) & 0x01) == 0)
3036 			return offset; /* Found */
3037 
3038 	/* Not found */
3039 	return 0;
3040 }
3041 
3042 static int
3043 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3044 {
3045 	uint16_t myea[ETHER_ADDR_LEN / 2];
3046 	uint16_t offset = NVM_OFF_MACADDR;
3047 	int do_invert = 0;
3048 
3049 	switch (sc->sc_type) {
3050 	case WM_T_82580:
3051 	case WM_T_I350:
3052 	case WM_T_I354:
3053 		/* EEPROM Top Level Partitioning */
3054 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3055 		break;
3056 	case WM_T_82571:
3057 	case WM_T_82575:
3058 	case WM_T_82576:
3059 	case WM_T_80003:
3060 	case WM_T_I210:
3061 	case WM_T_I211:
3062 		offset = wm_check_alt_mac_addr(sc);
3063 		if (offset == 0)
3064 			if ((sc->sc_funcid & 0x01) == 1)
3065 				do_invert = 1;
3066 		break;
3067 	default:
3068 		if ((sc->sc_funcid & 0x01) == 1)
3069 			do_invert = 1;
3070 		break;
3071 	}
3072 
3073 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3074 		goto bad;
3075 
3076 	enaddr[0] = myea[0] & 0xff;
3077 	enaddr[1] = myea[0] >> 8;
3078 	enaddr[2] = myea[1] & 0xff;
3079 	enaddr[3] = myea[1] >> 8;
3080 	enaddr[4] = myea[2] & 0xff;
3081 	enaddr[5] = myea[2] >> 8;
3082 
3083 	/*
3084 	 * Toggle the LSB of the MAC address on the second port
3085 	 * of some dual port cards.
3086 	 */
3087 	if (do_invert != 0)
3088 		enaddr[5] ^= 1;
3089 
3090 	return 0;
3091 
3092  bad:
3093 	return -1;
3094 }
3095 
3096 /*
3097  * wm_set_ral:
3098  *
3099  *	Set an entery in the receive address list.
3100  */
3101 static void
3102 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3103 {
3104 	uint32_t ral_lo, ral_hi;
3105 
3106 	if (enaddr != NULL) {
3107 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3108 		    (enaddr[3] << 24);
3109 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3110 		ral_hi |= RAL_AV;
3111 	} else {
3112 		ral_lo = 0;
3113 		ral_hi = 0;
3114 	}
3115 
3116 	if (sc->sc_type >= WM_T_82544) {
3117 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3118 		    ral_lo);
3119 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3120 		    ral_hi);
3121 	} else {
3122 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3123 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3124 	}
3125 }
3126 
3127 /*
3128  * wm_mchash:
3129  *
3130  *	Compute the hash of the multicast address for the 4096-bit
3131  *	multicast filter.
3132  */
3133 static uint32_t
3134 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3135 {
3136 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3137 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3138 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3139 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3140 	uint32_t hash;
3141 
3142 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3143 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3144 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3145 	    || (sc->sc_type == WM_T_PCH_SPT)) {
3146 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3147 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3148 		return (hash & 0x3ff);
3149 	}
3150 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3151 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3152 
3153 	return (hash & 0xfff);
3154 }
3155 
3156 /*
3157  * wm_set_filter:
3158  *
3159  *	Set up the receive filter.
3160  */
3161 static void
3162 wm_set_filter(struct wm_softc *sc)
3163 {
3164 	struct ethercom *ec = &sc->sc_ethercom;
3165 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3166 	struct ether_multi *enm;
3167 	struct ether_multistep step;
3168 	bus_addr_t mta_reg;
3169 	uint32_t hash, reg, bit;
3170 	int i, size, ralmax;
3171 
3172 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3173 		device_xname(sc->sc_dev), __func__));
3174 
3175 	if (sc->sc_type >= WM_T_82544)
3176 		mta_reg = WMREG_CORDOVA_MTA;
3177 	else
3178 		mta_reg = WMREG_MTA;
3179 
3180 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3181 
3182 	if (ifp->if_flags & IFF_BROADCAST)
3183 		sc->sc_rctl |= RCTL_BAM;
3184 	if (ifp->if_flags & IFF_PROMISC) {
3185 		sc->sc_rctl |= RCTL_UPE;
3186 		goto allmulti;
3187 	}
3188 
3189 	/*
3190 	 * Set the station address in the first RAL slot, and
3191 	 * clear the remaining slots.
3192 	 */
3193 	if (sc->sc_type == WM_T_ICH8)
3194 		size = WM_RAL_TABSIZE_ICH8 -1;
3195 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3196 	    || (sc->sc_type == WM_T_PCH))
3197 		size = WM_RAL_TABSIZE_ICH8;
3198 	else if (sc->sc_type == WM_T_PCH2)
3199 		size = WM_RAL_TABSIZE_PCH2;
3200 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3201 		size = WM_RAL_TABSIZE_PCH_LPT;
3202 	else if (sc->sc_type == WM_T_82575)
3203 		size = WM_RAL_TABSIZE_82575;
3204 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3205 		size = WM_RAL_TABSIZE_82576;
3206 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3207 		size = WM_RAL_TABSIZE_I350;
3208 	else
3209 		size = WM_RAL_TABSIZE;
3210 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3211 
3212 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3213 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3214 		switch (i) {
3215 		case 0:
3216 			/* We can use all entries */
3217 			ralmax = size;
3218 			break;
3219 		case 1:
3220 			/* Only RAR[0] */
3221 			ralmax = 1;
3222 			break;
3223 		default:
3224 			/* available SHRA + RAR[0] */
3225 			ralmax = i + 1;
3226 		}
3227 	} else
3228 		ralmax = size;
3229 	for (i = 1; i < size; i++) {
3230 		if (i < ralmax)
3231 			wm_set_ral(sc, NULL, i);
3232 	}
3233 
3234 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3235 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3236 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3237 	    || (sc->sc_type == WM_T_PCH_SPT))
3238 		size = WM_ICH8_MC_TABSIZE;
3239 	else
3240 		size = WM_MC_TABSIZE;
3241 	/* Clear out the multicast table. */
3242 	for (i = 0; i < size; i++)
3243 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3244 
3245 	ETHER_FIRST_MULTI(step, ec, enm);
3246 	while (enm != NULL) {
3247 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3248 			/*
3249 			 * We must listen to a range of multicast addresses.
3250 			 * For now, just accept all multicasts, rather than
3251 			 * trying to set only those filter bits needed to match
3252 			 * the range.  (At this time, the only use of address
3253 			 * ranges is for IP multicast routing, for which the
3254 			 * range is big enough to require all bits set.)
3255 			 */
3256 			goto allmulti;
3257 		}
3258 
3259 		hash = wm_mchash(sc, enm->enm_addrlo);
3260 
3261 		reg = (hash >> 5);
3262 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3263 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3264 		    || (sc->sc_type == WM_T_PCH2)
3265 		    || (sc->sc_type == WM_T_PCH_LPT)
3266 		    || (sc->sc_type == WM_T_PCH_SPT))
3267 			reg &= 0x1f;
3268 		else
3269 			reg &= 0x7f;
3270 		bit = hash & 0x1f;
3271 
3272 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3273 		hash |= 1U << bit;
3274 
3275 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3276 			/*
3277 			 * 82544 Errata 9: Certain register cannot be written
3278 			 * with particular alignments in PCI-X bus operation
3279 			 * (FCAH, MTA and VFTA).
3280 			 */
3281 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3282 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3283 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3284 		} else
3285 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3286 
3287 		ETHER_NEXT_MULTI(step, enm);
3288 	}
3289 
3290 	ifp->if_flags &= ~IFF_ALLMULTI;
3291 	goto setit;
3292 
3293  allmulti:
3294 	ifp->if_flags |= IFF_ALLMULTI;
3295 	sc->sc_rctl |= RCTL_MPE;
3296 
3297  setit:
3298 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3299 }
3300 
3301 /* Reset and init related */
3302 
3303 static void
3304 wm_set_vlan(struct wm_softc *sc)
3305 {
3306 
3307 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3308 		device_xname(sc->sc_dev), __func__));
3309 
3310 	/* Deal with VLAN enables. */
3311 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3312 		sc->sc_ctrl |= CTRL_VME;
3313 	else
3314 		sc->sc_ctrl &= ~CTRL_VME;
3315 
3316 	/* Write the control registers. */
3317 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3318 }
3319 
3320 static void
3321 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3322 {
3323 	uint32_t gcr;
3324 	pcireg_t ctrl2;
3325 
3326 	gcr = CSR_READ(sc, WMREG_GCR);
3327 
3328 	/* Only take action if timeout value is defaulted to 0 */
3329 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3330 		goto out;
3331 
3332 	if ((gcr & GCR_CAP_VER2) == 0) {
3333 		gcr |= GCR_CMPL_TMOUT_10MS;
3334 		goto out;
3335 	}
3336 
3337 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3338 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3339 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3340 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3341 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3342 
3343 out:
3344 	/* Disable completion timeout resend */
3345 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3346 
3347 	CSR_WRITE(sc, WMREG_GCR, gcr);
3348 }
3349 
3350 void
3351 wm_get_auto_rd_done(struct wm_softc *sc)
3352 {
3353 	int i;
3354 
3355 	/* wait for eeprom to reload */
3356 	switch (sc->sc_type) {
3357 	case WM_T_82571:
3358 	case WM_T_82572:
3359 	case WM_T_82573:
3360 	case WM_T_82574:
3361 	case WM_T_82583:
3362 	case WM_T_82575:
3363 	case WM_T_82576:
3364 	case WM_T_82580:
3365 	case WM_T_I350:
3366 	case WM_T_I354:
3367 	case WM_T_I210:
3368 	case WM_T_I211:
3369 	case WM_T_80003:
3370 	case WM_T_ICH8:
3371 	case WM_T_ICH9:
3372 		for (i = 0; i < 10; i++) {
3373 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3374 				break;
3375 			delay(1000);
3376 		}
3377 		if (i == 10) {
3378 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3379 			    "complete\n", device_xname(sc->sc_dev));
3380 		}
3381 		break;
3382 	default:
3383 		break;
3384 	}
3385 }
3386 
3387 void
3388 wm_lan_init_done(struct wm_softc *sc)
3389 {
3390 	uint32_t reg = 0;
3391 	int i;
3392 
3393 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3394 		device_xname(sc->sc_dev), __func__));
3395 
3396 	/* Wait for eeprom to reload */
3397 	switch (sc->sc_type) {
3398 	case WM_T_ICH10:
3399 	case WM_T_PCH:
3400 	case WM_T_PCH2:
3401 	case WM_T_PCH_LPT:
3402 	case WM_T_PCH_SPT:
3403 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3404 			reg = CSR_READ(sc, WMREG_STATUS);
3405 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3406 				break;
3407 			delay(100);
3408 		}
3409 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3410 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3411 			    "complete\n", device_xname(sc->sc_dev), __func__);
3412 		}
3413 		break;
3414 	default:
3415 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3416 		    __func__);
3417 		break;
3418 	}
3419 
3420 	reg &= ~STATUS_LAN_INIT_DONE;
3421 	CSR_WRITE(sc, WMREG_STATUS, reg);
3422 }
3423 
3424 void
3425 wm_get_cfg_done(struct wm_softc *sc)
3426 {
3427 	int mask;
3428 	uint32_t reg;
3429 	int i;
3430 
3431 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3432 		device_xname(sc->sc_dev), __func__));
3433 
3434 	/* Wait for eeprom to reload */
3435 	switch (sc->sc_type) {
3436 	case WM_T_82542_2_0:
3437 	case WM_T_82542_2_1:
3438 		/* null */
3439 		break;
3440 	case WM_T_82543:
3441 	case WM_T_82544:
3442 	case WM_T_82540:
3443 	case WM_T_82545:
3444 	case WM_T_82545_3:
3445 	case WM_T_82546:
3446 	case WM_T_82546_3:
3447 	case WM_T_82541:
3448 	case WM_T_82541_2:
3449 	case WM_T_82547:
3450 	case WM_T_82547_2:
3451 	case WM_T_82573:
3452 	case WM_T_82574:
3453 	case WM_T_82583:
3454 		/* generic */
3455 		delay(10*1000);
3456 		break;
3457 	case WM_T_80003:
3458 	case WM_T_82571:
3459 	case WM_T_82572:
3460 	case WM_T_82575:
3461 	case WM_T_82576:
3462 	case WM_T_82580:
3463 	case WM_T_I350:
3464 	case WM_T_I354:
3465 	case WM_T_I210:
3466 	case WM_T_I211:
3467 		if (sc->sc_type == WM_T_82571) {
3468 			/* Only 82571 shares port 0 */
3469 			mask = EEMNGCTL_CFGDONE_0;
3470 		} else
3471 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3472 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3473 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3474 				break;
3475 			delay(1000);
3476 		}
3477 		if (i >= WM_PHY_CFG_TIMEOUT) {
3478 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3479 				device_xname(sc->sc_dev), __func__));
3480 		}
3481 		break;
3482 	case WM_T_ICH8:
3483 	case WM_T_ICH9:
3484 	case WM_T_ICH10:
3485 	case WM_T_PCH:
3486 	case WM_T_PCH2:
3487 	case WM_T_PCH_LPT:
3488 	case WM_T_PCH_SPT:
3489 		delay(10*1000);
3490 		if (sc->sc_type >= WM_T_ICH10)
3491 			wm_lan_init_done(sc);
3492 		else
3493 			wm_get_auto_rd_done(sc);
3494 
3495 		reg = CSR_READ(sc, WMREG_STATUS);
3496 		if ((reg & STATUS_PHYRA) != 0)
3497 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3498 		break;
3499 	default:
3500 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3501 		    __func__);
3502 		break;
3503 	}
3504 }
3505 
3506 /* Init hardware bits */
3507 void
3508 wm_initialize_hardware_bits(struct wm_softc *sc)
3509 {
3510 	uint32_t tarc0, tarc1, reg;
3511 
3512 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3513 		device_xname(sc->sc_dev), __func__));
3514 
3515 	/* For 82571 variant, 80003 and ICHs */
3516 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3517 	    || (sc->sc_type >= WM_T_80003)) {
3518 
3519 		/* Transmit Descriptor Control 0 */
3520 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
3521 		reg |= TXDCTL_COUNT_DESC;
3522 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3523 
3524 		/* Transmit Descriptor Control 1 */
3525 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
3526 		reg |= TXDCTL_COUNT_DESC;
3527 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3528 
3529 		/* TARC0 */
3530 		tarc0 = CSR_READ(sc, WMREG_TARC0);
3531 		switch (sc->sc_type) {
3532 		case WM_T_82571:
3533 		case WM_T_82572:
3534 		case WM_T_82573:
3535 		case WM_T_82574:
3536 		case WM_T_82583:
3537 		case WM_T_80003:
3538 			/* Clear bits 30..27 */
3539 			tarc0 &= ~__BITS(30, 27);
3540 			break;
3541 		default:
3542 			break;
3543 		}
3544 
3545 		switch (sc->sc_type) {
3546 		case WM_T_82571:
3547 		case WM_T_82572:
3548 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3549 
3550 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3551 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3552 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3553 			/* 8257[12] Errata No.7 */
3554 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
3555 
3556 			/* TARC1 bit 28 */
3557 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3558 				tarc1 &= ~__BIT(28);
3559 			else
3560 				tarc1 |= __BIT(28);
3561 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3562 
3563 			/*
3564 			 * 8257[12] Errata No.13
3565 			 * Disable Dyamic Clock Gating.
3566 			 */
3567 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3568 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
3569 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3570 			break;
3571 		case WM_T_82573:
3572 		case WM_T_82574:
3573 		case WM_T_82583:
3574 			if ((sc->sc_type == WM_T_82574)
3575 			    || (sc->sc_type == WM_T_82583))
3576 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
3577 
3578 			/* Extended Device Control */
3579 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3580 			reg &= ~__BIT(23);	/* Clear bit 23 */
3581 			reg |= __BIT(22);	/* Set bit 22 */
3582 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3583 
3584 			/* Device Control */
3585 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
3586 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3587 
3588 			/* PCIe Control Register */
3589 			/*
3590 			 * 82573 Errata (unknown).
3591 			 *
3592 			 * 82574 Errata 25 and 82583 Errata 12
3593 			 * "Dropped Rx Packets":
3594 			 *   NVM Image Version 2.1.4 and newer has no this bug.
3595 			 */
3596 			reg = CSR_READ(sc, WMREG_GCR);
3597 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3598 			CSR_WRITE(sc, WMREG_GCR, reg);
3599 
3600 			if ((sc->sc_type == WM_T_82574)
3601 			    || (sc->sc_type == WM_T_82583)) {
3602 				/*
3603 				 * Document says this bit must be set for
3604 				 * proper operation.
3605 				 */
3606 				reg = CSR_READ(sc, WMREG_GCR);
3607 				reg |= __BIT(22);
3608 				CSR_WRITE(sc, WMREG_GCR, reg);
3609 
3610 				/*
3611 				 * Apply workaround for hardware errata
3612 				 * documented in errata docs Fixes issue where
3613 				 * some error prone or unreliable PCIe
3614 				 * completions are occurring, particularly
3615 				 * with ASPM enabled. Without fix, issue can
3616 				 * cause Tx timeouts.
3617 				 */
3618 				reg = CSR_READ(sc, WMREG_GCR2);
3619 				reg |= __BIT(0);
3620 				CSR_WRITE(sc, WMREG_GCR2, reg);
3621 			}
3622 			break;
3623 		case WM_T_80003:
3624 			/* TARC0 */
3625 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3626 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3627 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
3628 
3629 			/* TARC1 bit 28 */
3630 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3631 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3632 				tarc1 &= ~__BIT(28);
3633 			else
3634 				tarc1 |= __BIT(28);
3635 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3636 			break;
3637 		case WM_T_ICH8:
3638 		case WM_T_ICH9:
3639 		case WM_T_ICH10:
3640 		case WM_T_PCH:
3641 		case WM_T_PCH2:
3642 		case WM_T_PCH_LPT:
3643 		case WM_T_PCH_SPT:
3644 			/* TARC0 */
3645 			if ((sc->sc_type == WM_T_ICH8)
3646 			    || (sc->sc_type == WM_T_PCH_SPT)) {
3647 				/* Set TARC0 bits 29 and 28 */
3648 				tarc0 |= __BITS(29, 28);
3649 			}
3650 			/* Set TARC0 bits 23,24,26,27 */
3651 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3652 
3653 			/* CTRL_EXT */
3654 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3655 			reg |= __BIT(22);	/* Set bit 22 */
3656 			/*
3657 			 * Enable PHY low-power state when MAC is at D3
3658 			 * w/o WoL
3659 			 */
3660 			if (sc->sc_type >= WM_T_PCH)
3661 				reg |= CTRL_EXT_PHYPDEN;
3662 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3663 
3664 			/* TARC1 */
3665 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3666 			/* bit 28 */
3667 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3668 				tarc1 &= ~__BIT(28);
3669 			else
3670 				tarc1 |= __BIT(28);
3671 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3672 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3673 
3674 			/* Device Status */
3675 			if (sc->sc_type == WM_T_ICH8) {
3676 				reg = CSR_READ(sc, WMREG_STATUS);
3677 				reg &= ~__BIT(31);
3678 				CSR_WRITE(sc, WMREG_STATUS, reg);
3679 
3680 			}
3681 
3682 			/* IOSFPC */
3683 			if (sc->sc_type == WM_T_PCH_SPT) {
3684 				reg = CSR_READ(sc, WMREG_IOSFPC);
3685 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3686 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
3687 			}
3688 			/*
3689 			 * Work-around descriptor data corruption issue during
3690 			 * NFS v2 UDP traffic, just disable the NFS filtering
3691 			 * capability.
3692 			 */
3693 			reg = CSR_READ(sc, WMREG_RFCTL);
3694 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3695 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3696 			break;
3697 		default:
3698 			break;
3699 		}
3700 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
3701 
3702 		/*
3703 		 * 8257[12] Errata No.52 and some others.
3704 		 * Avoid RSS Hash Value bug.
3705 		 */
3706 		switch (sc->sc_type) {
3707 		case WM_T_82571:
3708 		case WM_T_82572:
3709 		case WM_T_82573:
3710 		case WM_T_80003:
3711 		case WM_T_ICH8:
3712 			reg = CSR_READ(sc, WMREG_RFCTL);
3713 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3714 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3715 			break;
3716 		default:
3717 			break;
3718 		}
3719 	}
3720 }
3721 
3722 static uint32_t
3723 wm_rxpbs_adjust_82580(uint32_t val)
3724 {
3725 	uint32_t rv = 0;
3726 
3727 	if (val < __arraycount(wm_82580_rxpbs_table))
3728 		rv = wm_82580_rxpbs_table[val];
3729 
3730 	return rv;
3731 }
3732 
3733 /*
3734  * wm_reset:
3735  *
3736  *	Reset the i82542 chip.
3737  */
3738 static void
3739 wm_reset(struct wm_softc *sc)
3740 {
3741 	int phy_reset = 0;
3742 	int i, error = 0;
3743 	uint32_t reg;
3744 
3745 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3746 		device_xname(sc->sc_dev), __func__));
3747 	KASSERT(sc->sc_type != 0);
3748 
3749 	/*
3750 	 * Allocate on-chip memory according to the MTU size.
3751 	 * The Packet Buffer Allocation register must be written
3752 	 * before the chip is reset.
3753 	 */
3754 	switch (sc->sc_type) {
3755 	case WM_T_82547:
3756 	case WM_T_82547_2:
3757 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3758 		    PBA_22K : PBA_30K;
3759 		for (i = 0; i < sc->sc_nqueues; i++) {
3760 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3761 			txq->txq_fifo_head = 0;
3762 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3763 			txq->txq_fifo_size =
3764 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3765 			txq->txq_fifo_stall = 0;
3766 		}
3767 		break;
3768 	case WM_T_82571:
3769 	case WM_T_82572:
3770 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3771 	case WM_T_80003:
3772 		sc->sc_pba = PBA_32K;
3773 		break;
3774 	case WM_T_82573:
3775 		sc->sc_pba = PBA_12K;
3776 		break;
3777 	case WM_T_82574:
3778 	case WM_T_82583:
3779 		sc->sc_pba = PBA_20K;
3780 		break;
3781 	case WM_T_82576:
3782 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3783 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3784 		break;
3785 	case WM_T_82580:
3786 	case WM_T_I350:
3787 	case WM_T_I354:
3788 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3789 		break;
3790 	case WM_T_I210:
3791 	case WM_T_I211:
3792 		sc->sc_pba = PBA_34K;
3793 		break;
3794 	case WM_T_ICH8:
3795 		/* Workaround for a bit corruption issue in FIFO memory */
3796 		sc->sc_pba = PBA_8K;
3797 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3798 		break;
3799 	case WM_T_ICH9:
3800 	case WM_T_ICH10:
3801 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3802 		    PBA_14K : PBA_10K;
3803 		break;
3804 	case WM_T_PCH:
3805 	case WM_T_PCH2:
3806 	case WM_T_PCH_LPT:
3807 	case WM_T_PCH_SPT:
3808 		sc->sc_pba = PBA_26K;
3809 		break;
3810 	default:
3811 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3812 		    PBA_40K : PBA_48K;
3813 		break;
3814 	}
3815 	/*
3816 	 * Only old or non-multiqueue devices have the PBA register
3817 	 * XXX Need special handling for 82575.
3818 	 */
3819 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3820 	    || (sc->sc_type == WM_T_82575))
3821 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3822 
3823 	/* Prevent the PCI-E bus from sticking */
3824 	if (sc->sc_flags & WM_F_PCIE) {
3825 		int timeout = 800;
3826 
3827 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3828 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3829 
3830 		while (timeout--) {
3831 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3832 			    == 0)
3833 				break;
3834 			delay(100);
3835 		}
3836 	}
3837 
3838 	/* Set the completion timeout for interface */
3839 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3840 	    || (sc->sc_type == WM_T_82580)
3841 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3842 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3843 		wm_set_pcie_completion_timeout(sc);
3844 
3845 	/* Clear interrupt */
3846 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3847 	if (sc->sc_nintrs > 1) {
3848 		if (sc->sc_type != WM_T_82574) {
3849 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3850 			CSR_WRITE(sc, WMREG_EIAC, 0);
3851 		} else {
3852 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3853 		}
3854 	}
3855 
3856 	/* Stop the transmit and receive processes. */
3857 	CSR_WRITE(sc, WMREG_RCTL, 0);
3858 	sc->sc_rctl &= ~RCTL_EN;
3859 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
3860 	CSR_WRITE_FLUSH(sc);
3861 
3862 	/* XXX set_tbi_sbp_82543() */
3863 
3864 	delay(10*1000);
3865 
3866 	/* Must acquire the MDIO ownership before MAC reset */
3867 	switch (sc->sc_type) {
3868 	case WM_T_82573:
3869 	case WM_T_82574:
3870 	case WM_T_82583:
3871 		error = wm_get_hw_semaphore_82573(sc);
3872 		break;
3873 	default:
3874 		break;
3875 	}
3876 
3877 	/*
3878 	 * 82541 Errata 29? & 82547 Errata 28?
3879 	 * See also the description about PHY_RST bit in CTRL register
3880 	 * in 8254x_GBe_SDM.pdf.
3881 	 */
3882 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
3883 		CSR_WRITE(sc, WMREG_CTRL,
3884 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
3885 		CSR_WRITE_FLUSH(sc);
3886 		delay(5000);
3887 	}
3888 
3889 	switch (sc->sc_type) {
3890 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
3891 	case WM_T_82541:
3892 	case WM_T_82541_2:
3893 	case WM_T_82547:
3894 	case WM_T_82547_2:
3895 		/*
3896 		 * On some chipsets, a reset through a memory-mapped write
3897 		 * cycle can cause the chip to reset before completing the
3898 		 * write cycle.  This causes major headache that can be
3899 		 * avoided by issuing the reset via indirect register writes
3900 		 * through I/O space.
3901 		 *
3902 		 * So, if we successfully mapped the I/O BAR at attach time,
3903 		 * use that.  Otherwise, try our luck with a memory-mapped
3904 		 * reset.
3905 		 */
3906 		if (sc->sc_flags & WM_F_IOH_VALID)
3907 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
3908 		else
3909 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
3910 		break;
3911 	case WM_T_82545_3:
3912 	case WM_T_82546_3:
3913 		/* Use the shadow control register on these chips. */
3914 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
3915 		break;
3916 	case WM_T_80003:
3917 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3918 		sc->phy.acquire(sc);
3919 		CSR_WRITE(sc, WMREG_CTRL, reg);
3920 		sc->phy.release(sc);
3921 		break;
3922 	case WM_T_ICH8:
3923 	case WM_T_ICH9:
3924 	case WM_T_ICH10:
3925 	case WM_T_PCH:
3926 	case WM_T_PCH2:
3927 	case WM_T_PCH_LPT:
3928 	case WM_T_PCH_SPT:
3929 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
3930 		if (wm_phy_resetisblocked(sc) == false) {
3931 			/*
3932 			 * Gate automatic PHY configuration by hardware on
3933 			 * non-managed 82579
3934 			 */
3935 			if ((sc->sc_type == WM_T_PCH2)
3936 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
3937 				== 0))
3938 				wm_gate_hw_phy_config_ich8lan(sc, true);
3939 
3940 			reg |= CTRL_PHY_RESET;
3941 			phy_reset = 1;
3942 		} else
3943 			printf("XXX reset is blocked!!!\n");
3944 		sc->phy.acquire(sc);
3945 		CSR_WRITE(sc, WMREG_CTRL, reg);
3946 		/* Don't insert a completion barrier when reset */
3947 		delay(20*1000);
3948 		mutex_exit(sc->sc_ich_phymtx);
3949 		break;
3950 	case WM_T_82580:
3951 	case WM_T_I350:
3952 	case WM_T_I354:
3953 	case WM_T_I210:
3954 	case WM_T_I211:
3955 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3956 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
3957 			CSR_WRITE_FLUSH(sc);
3958 		delay(5000);
3959 		break;
3960 	case WM_T_82542_2_0:
3961 	case WM_T_82542_2_1:
3962 	case WM_T_82543:
3963 	case WM_T_82540:
3964 	case WM_T_82545:
3965 	case WM_T_82546:
3966 	case WM_T_82571:
3967 	case WM_T_82572:
3968 	case WM_T_82573:
3969 	case WM_T_82574:
3970 	case WM_T_82575:
3971 	case WM_T_82576:
3972 	case WM_T_82583:
3973 	default:
3974 		/* Everything else can safely use the documented method. */
3975 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
3976 		break;
3977 	}
3978 
3979 	/* Must release the MDIO ownership after MAC reset */
3980 	switch (sc->sc_type) {
3981 	case WM_T_82573:
3982 	case WM_T_82574:
3983 	case WM_T_82583:
3984 		if (error == 0)
3985 			wm_put_hw_semaphore_82573(sc);
3986 		break;
3987 	default:
3988 		break;
3989 	}
3990 
3991 	if (phy_reset != 0) {
3992 		wm_get_cfg_done(sc);
3993 		delay(10 * 1000);
3994 		if (sc->sc_type >= WM_T_PCH) {
3995 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
3996 			    BM_PORT_GEN_CFG);
3997 			reg &= ~BM_WUC_HOST_WU_BIT;
3998 			wm_gmii_hv_writereg(sc->sc_dev, 2,
3999 			    BM_PORT_GEN_CFG, reg);
4000 		}
4001 	}
4002 
4003 	/* reload EEPROM */
4004 	switch (sc->sc_type) {
4005 	case WM_T_82542_2_0:
4006 	case WM_T_82542_2_1:
4007 	case WM_T_82543:
4008 	case WM_T_82544:
4009 		delay(10);
4010 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4011 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4012 		CSR_WRITE_FLUSH(sc);
4013 		delay(2000);
4014 		break;
4015 	case WM_T_82540:
4016 	case WM_T_82545:
4017 	case WM_T_82545_3:
4018 	case WM_T_82546:
4019 	case WM_T_82546_3:
4020 		delay(5*1000);
4021 		/* XXX Disable HW ARPs on ASF enabled adapters */
4022 		break;
4023 	case WM_T_82541:
4024 	case WM_T_82541_2:
4025 	case WM_T_82547:
4026 	case WM_T_82547_2:
4027 		delay(20000);
4028 		/* XXX Disable HW ARPs on ASF enabled adapters */
4029 		break;
4030 	case WM_T_82571:
4031 	case WM_T_82572:
4032 	case WM_T_82573:
4033 	case WM_T_82574:
4034 	case WM_T_82583:
4035 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4036 			delay(10);
4037 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4038 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4039 			CSR_WRITE_FLUSH(sc);
4040 		}
4041 		/* check EECD_EE_AUTORD */
4042 		wm_get_auto_rd_done(sc);
4043 		/*
4044 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4045 		 * is set.
4046 		 */
4047 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4048 		    || (sc->sc_type == WM_T_82583))
4049 			delay(25*1000);
4050 		break;
4051 	case WM_T_82575:
4052 	case WM_T_82576:
4053 	case WM_T_82580:
4054 	case WM_T_I350:
4055 	case WM_T_I354:
4056 	case WM_T_I210:
4057 	case WM_T_I211:
4058 	case WM_T_80003:
4059 		/* check EECD_EE_AUTORD */
4060 		wm_get_auto_rd_done(sc);
4061 		break;
4062 	case WM_T_ICH8:
4063 	case WM_T_ICH9:
4064 	case WM_T_ICH10:
4065 	case WM_T_PCH:
4066 	case WM_T_PCH2:
4067 	case WM_T_PCH_LPT:
4068 	case WM_T_PCH_SPT:
4069 		break;
4070 	default:
4071 		panic("%s: unknown type\n", __func__);
4072 	}
4073 
4074 	/* Check whether EEPROM is present or not */
4075 	switch (sc->sc_type) {
4076 	case WM_T_82575:
4077 	case WM_T_82576:
4078 	case WM_T_82580:
4079 	case WM_T_I350:
4080 	case WM_T_I354:
4081 	case WM_T_ICH8:
4082 	case WM_T_ICH9:
4083 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4084 			/* Not found */
4085 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4086 			if (sc->sc_type == WM_T_82575)
4087 				wm_reset_init_script_82575(sc);
4088 		}
4089 		break;
4090 	default:
4091 		break;
4092 	}
4093 
4094 	if ((sc->sc_type == WM_T_82580)
4095 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4096 		/* clear global device reset status bit */
4097 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4098 	}
4099 
4100 	/* Clear any pending interrupt events. */
4101 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4102 	reg = CSR_READ(sc, WMREG_ICR);
4103 	if (sc->sc_nintrs > 1) {
4104 		if (sc->sc_type != WM_T_82574) {
4105 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4106 			CSR_WRITE(sc, WMREG_EIAC, 0);
4107 		} else
4108 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4109 	}
4110 
4111 	/* reload sc_ctrl */
4112 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4113 
4114 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4115 		wm_set_eee_i350(sc);
4116 
4117 	/* dummy read from WUC */
4118 	if (sc->sc_type == WM_T_PCH)
4119 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
4120 	/*
4121 	 * For PCH, this write will make sure that any noise will be detected
4122 	 * as a CRC error and be dropped rather than show up as a bad packet
4123 	 * to the DMA engine
4124 	 */
4125 	if (sc->sc_type == WM_T_PCH)
4126 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4127 
4128 	if (sc->sc_type >= WM_T_82544)
4129 		CSR_WRITE(sc, WMREG_WUC, 0);
4130 
4131 	wm_reset_mdicnfg_82580(sc);
4132 
4133 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4134 		wm_pll_workaround_i210(sc);
4135 }
4136 
4137 /*
4138  * wm_add_rxbuf:
4139  *
4140  *	Add a receive buffer to the indiciated descriptor.
4141  */
4142 static int
4143 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4144 {
4145 	struct wm_softc *sc = rxq->rxq_sc;
4146 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4147 	struct mbuf *m;
4148 	int error;
4149 
4150 	KASSERT(mutex_owned(rxq->rxq_lock));
4151 
4152 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4153 	if (m == NULL)
4154 		return ENOBUFS;
4155 
4156 	MCLGET(m, M_DONTWAIT);
4157 	if ((m->m_flags & M_EXT) == 0) {
4158 		m_freem(m);
4159 		return ENOBUFS;
4160 	}
4161 
4162 	if (rxs->rxs_mbuf != NULL)
4163 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4164 
4165 	rxs->rxs_mbuf = m;
4166 
4167 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4168 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4169 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
4170 	if (error) {
4171 		/* XXX XXX XXX */
4172 		aprint_error_dev(sc->sc_dev,
4173 		    "unable to load rx DMA map %d, error = %d\n",
4174 		    idx, error);
4175 		panic("wm_add_rxbuf");
4176 	}
4177 
4178 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4179 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4180 
4181 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4182 		if ((sc->sc_rctl & RCTL_EN) != 0)
4183 			wm_init_rxdesc(rxq, idx);
4184 	} else
4185 		wm_init_rxdesc(rxq, idx);
4186 
4187 	return 0;
4188 }
4189 
4190 /*
4191  * wm_rxdrain:
4192  *
4193  *	Drain the receive queue.
4194  */
4195 static void
4196 wm_rxdrain(struct wm_rxqueue *rxq)
4197 {
4198 	struct wm_softc *sc = rxq->rxq_sc;
4199 	struct wm_rxsoft *rxs;
4200 	int i;
4201 
4202 	KASSERT(mutex_owned(rxq->rxq_lock));
4203 
4204 	for (i = 0; i < WM_NRXDESC; i++) {
4205 		rxs = &rxq->rxq_soft[i];
4206 		if (rxs->rxs_mbuf != NULL) {
4207 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4208 			m_freem(rxs->rxs_mbuf);
4209 			rxs->rxs_mbuf = NULL;
4210 		}
4211 	}
4212 }
4213 
4214 
4215 /*
4216  * XXX copy from FreeBSD's sys/net/rss_config.c
4217  */
4218 /*
4219  * RSS secret key, intended to prevent attacks on load-balancing.  Its
4220  * effectiveness may be limited by algorithm choice and available entropy
4221  * during the boot.
4222  *
4223  * XXXRW: And that we don't randomize it yet!
4224  *
4225  * This is the default Microsoft RSS specification key which is also
4226  * the Chelsio T5 firmware default key.
4227  */
4228 #define RSS_KEYSIZE 40
4229 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4230 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4231 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4232 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4233 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4234 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4235 };
4236 
4237 /*
4238  * Caller must pass an array of size sizeof(rss_key).
4239  *
4240  * XXX
4241  * As if_ixgbe may use this function, this function should not be
4242  * if_wm specific function.
4243  */
4244 static void
4245 wm_rss_getkey(uint8_t *key)
4246 {
4247 
4248 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4249 }
4250 
4251 /*
4252  * Setup registers for RSS.
4253  *
4254  * XXX not yet VMDq support
4255  */
4256 static void
4257 wm_init_rss(struct wm_softc *sc)
4258 {
4259 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4260 	int i;
4261 
4262 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4263 
4264 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4265 		int qid, reta_ent;
4266 
4267 		qid  = i % sc->sc_nqueues;
4268 		switch(sc->sc_type) {
4269 		case WM_T_82574:
4270 			reta_ent = __SHIFTIN(qid,
4271 			    RETA_ENT_QINDEX_MASK_82574);
4272 			break;
4273 		case WM_T_82575:
4274 			reta_ent = __SHIFTIN(qid,
4275 			    RETA_ENT_QINDEX1_MASK_82575);
4276 			break;
4277 		default:
4278 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4279 			break;
4280 		}
4281 
4282 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4283 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4284 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4285 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4286 	}
4287 
4288 	wm_rss_getkey((uint8_t *)rss_key);
4289 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4290 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4291 
4292 	if (sc->sc_type == WM_T_82574)
4293 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4294 	else
4295 		mrqc = MRQC_ENABLE_RSS_MQ;
4296 
4297 	/* XXXX
4298 	 * The same as FreeBSD igb.
4299 	 * Why doesn't use MRQC_RSS_FIELD_IPV6_EX?
4300 	 */
4301 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4302 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4303 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4304 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4305 
4306 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4307 }
4308 
4309 /*
4310  * Adjust TX and RX queue numbers which the system actulally uses.
4311  *
4312  * The numbers are affected by below parameters.
4313  *     - The nubmer of hardware queues
4314  *     - The number of MSI-X vectors (= "nvectors" argument)
4315  *     - ncpu
4316  */
4317 static void
4318 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4319 {
4320 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4321 
4322 	if (nvectors < 2) {
4323 		sc->sc_nqueues = 1;
4324 		return;
4325 	}
4326 
4327 	switch(sc->sc_type) {
4328 	case WM_T_82572:
4329 		hw_ntxqueues = 2;
4330 		hw_nrxqueues = 2;
4331 		break;
4332 	case WM_T_82574:
4333 		hw_ntxqueues = 2;
4334 		hw_nrxqueues = 2;
4335 		break;
4336 	case WM_T_82575:
4337 		hw_ntxqueues = 4;
4338 		hw_nrxqueues = 4;
4339 		break;
4340 	case WM_T_82576:
4341 		hw_ntxqueues = 16;
4342 		hw_nrxqueues = 16;
4343 		break;
4344 	case WM_T_82580:
4345 	case WM_T_I350:
4346 	case WM_T_I354:
4347 		hw_ntxqueues = 8;
4348 		hw_nrxqueues = 8;
4349 		break;
4350 	case WM_T_I210:
4351 		hw_ntxqueues = 4;
4352 		hw_nrxqueues = 4;
4353 		break;
4354 	case WM_T_I211:
4355 		hw_ntxqueues = 2;
4356 		hw_nrxqueues = 2;
4357 		break;
4358 		/*
4359 		 * As below ethernet controllers does not support MSI-X,
4360 		 * this driver let them not use multiqueue.
4361 		 *     - WM_T_80003
4362 		 *     - WM_T_ICH8
4363 		 *     - WM_T_ICH9
4364 		 *     - WM_T_ICH10
4365 		 *     - WM_T_PCH
4366 		 *     - WM_T_PCH2
4367 		 *     - WM_T_PCH_LPT
4368 		 */
4369 	default:
4370 		hw_ntxqueues = 1;
4371 		hw_nrxqueues = 1;
4372 		break;
4373 	}
4374 
4375 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4376 
4377 	/*
4378 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
4379 	 * the number of queues used actually.
4380 	 */
4381 	if (nvectors < hw_nqueues + 1) {
4382 		sc->sc_nqueues = nvectors - 1;
4383 	} else {
4384 		sc->sc_nqueues = hw_nqueues;
4385 	}
4386 
4387 	/*
4388 	 * As queues more then cpus cannot improve scaling, we limit
4389 	 * the number of queues used actually.
4390 	 */
4391 	if (ncpu < sc->sc_nqueues)
4392 		sc->sc_nqueues = ncpu;
4393 }
4394 
4395 /*
4396  * Both single interrupt MSI and INTx can use this function.
4397  */
4398 static int
4399 wm_setup_legacy(struct wm_softc *sc)
4400 {
4401 	pci_chipset_tag_t pc = sc->sc_pc;
4402 	const char *intrstr = NULL;
4403 	char intrbuf[PCI_INTRSTR_LEN];
4404 	int error;
4405 
4406 	error = wm_alloc_txrx_queues(sc);
4407 	if (error) {
4408 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4409 		    error);
4410 		return ENOMEM;
4411 	}
4412 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4413 	    sizeof(intrbuf));
4414 #ifdef WM_MPSAFE
4415 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4416 #endif
4417 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4418 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4419 	if (sc->sc_ihs[0] == NULL) {
4420 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4421 		    (pci_intr_type(pc, sc->sc_intrs[0])
4422 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4423 		return ENOMEM;
4424 	}
4425 
4426 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4427 	sc->sc_nintrs = 1;
4428 	return 0;
4429 }
4430 
4431 static int
4432 wm_setup_msix(struct wm_softc *sc)
4433 {
4434 	void *vih;
4435 	kcpuset_t *affinity;
4436 	int qidx, error, intr_idx, txrx_established;
4437 	pci_chipset_tag_t pc = sc->sc_pc;
4438 	const char *intrstr = NULL;
4439 	char intrbuf[PCI_INTRSTR_LEN];
4440 	char intr_xname[INTRDEVNAMEBUF];
4441 
4442 	if (sc->sc_nqueues < ncpu) {
4443 		/*
4444 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
4445 		 * interrupts start from CPU#1.
4446 		 */
4447 		sc->sc_affinity_offset = 1;
4448 	} else {
4449 		/*
4450 		 * In this case, this device use all CPUs. So, we unify
4451 		 * affinitied cpu_index to msix vector number for readability.
4452 		 */
4453 		sc->sc_affinity_offset = 0;
4454 	}
4455 
4456 	error = wm_alloc_txrx_queues(sc);
4457 	if (error) {
4458 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4459 		    error);
4460 		return ENOMEM;
4461 	}
4462 
4463 	kcpuset_create(&affinity, false);
4464 	intr_idx = 0;
4465 
4466 	/*
4467 	 * TX and RX
4468 	 */
4469 	txrx_established = 0;
4470 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4471 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4472 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4473 
4474 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4475 		    sizeof(intrbuf));
4476 #ifdef WM_MPSAFE
4477 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4478 		    PCI_INTR_MPSAFE, true);
4479 #endif
4480 		memset(intr_xname, 0, sizeof(intr_xname));
4481 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4482 		    device_xname(sc->sc_dev), qidx);
4483 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4484 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4485 		if (vih == NULL) {
4486 			aprint_error_dev(sc->sc_dev,
4487 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
4488 			    intrstr ? " at " : "",
4489 			    intrstr ? intrstr : "");
4490 
4491 			goto fail;
4492 		}
4493 		kcpuset_zero(affinity);
4494 		/* Round-robin affinity */
4495 		kcpuset_set(affinity, affinity_to);
4496 		error = interrupt_distribute(vih, affinity, NULL);
4497 		if (error == 0) {
4498 			aprint_normal_dev(sc->sc_dev,
4499 			    "for TX and RX interrupting at %s affinity to %u\n",
4500 			    intrstr, affinity_to);
4501 		} else {
4502 			aprint_normal_dev(sc->sc_dev,
4503 			    "for TX and RX interrupting at %s\n", intrstr);
4504 		}
4505 		sc->sc_ihs[intr_idx] = vih;
4506 		wmq->wmq_id= qidx;
4507 		wmq->wmq_intr_idx = intr_idx;
4508 
4509 		txrx_established++;
4510 		intr_idx++;
4511 	}
4512 
4513 	/*
4514 	 * LINK
4515 	 */
4516 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4517 	    sizeof(intrbuf));
4518 #ifdef WM_MPSAFE
4519 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4520 #endif
4521 	memset(intr_xname, 0, sizeof(intr_xname));
4522 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4523 	    device_xname(sc->sc_dev));
4524 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4525 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
4526 	if (vih == NULL) {
4527 		aprint_error_dev(sc->sc_dev,
4528 		    "unable to establish MSI-X(for LINK)%s%s\n",
4529 		    intrstr ? " at " : "",
4530 		    intrstr ? intrstr : "");
4531 
4532 		goto fail;
4533 	}
4534 	/* keep default affinity to LINK interrupt */
4535 	aprint_normal_dev(sc->sc_dev,
4536 	    "for LINK interrupting at %s\n", intrstr);
4537 	sc->sc_ihs[intr_idx] = vih;
4538 	sc->sc_link_intr_idx = intr_idx;
4539 
4540 	sc->sc_nintrs = sc->sc_nqueues + 1;
4541 	kcpuset_destroy(affinity);
4542 	return 0;
4543 
4544  fail:
4545 	for (qidx = 0; qidx < txrx_established; qidx++) {
4546 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4547 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4548 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4549 	}
4550 
4551 	kcpuset_destroy(affinity);
4552 	return ENOMEM;
4553 }
4554 
4555 /*
4556  * wm_init:		[ifnet interface function]
4557  *
4558  *	Initialize the interface.
4559  */
4560 static int
4561 wm_init(struct ifnet *ifp)
4562 {
4563 	struct wm_softc *sc = ifp->if_softc;
4564 	int ret;
4565 
4566 	WM_CORE_LOCK(sc);
4567 	ret = wm_init_locked(ifp);
4568 	WM_CORE_UNLOCK(sc);
4569 
4570 	return ret;
4571 }
4572 
4573 static int
4574 wm_init_locked(struct ifnet *ifp)
4575 {
4576 	struct wm_softc *sc = ifp->if_softc;
4577 	int i, j, trynum, error = 0;
4578 	uint32_t reg;
4579 
4580 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4581 		device_xname(sc->sc_dev), __func__));
4582 	KASSERT(WM_CORE_LOCKED(sc));
4583 
4584 	/*
4585 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4586 	 * There is a small but measurable benefit to avoiding the adjusment
4587 	 * of the descriptor so that the headers are aligned, for normal mtu,
4588 	 * on such platforms.  One possibility is that the DMA itself is
4589 	 * slightly more efficient if the front of the entire packet (instead
4590 	 * of the front of the headers) is aligned.
4591 	 *
4592 	 * Note we must always set align_tweak to 0 if we are using
4593 	 * jumbo frames.
4594 	 */
4595 #ifdef __NO_STRICT_ALIGNMENT
4596 	sc->sc_align_tweak = 0;
4597 #else
4598 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4599 		sc->sc_align_tweak = 0;
4600 	else
4601 		sc->sc_align_tweak = 2;
4602 #endif /* __NO_STRICT_ALIGNMENT */
4603 
4604 	/* Cancel any pending I/O. */
4605 	wm_stop_locked(ifp, 0);
4606 
4607 	/* update statistics before reset */
4608 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4609 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4610 
4611 	/* Reset the chip to a known state. */
4612 	wm_reset(sc);
4613 
4614 	switch (sc->sc_type) {
4615 	case WM_T_82571:
4616 	case WM_T_82572:
4617 	case WM_T_82573:
4618 	case WM_T_82574:
4619 	case WM_T_82583:
4620 	case WM_T_80003:
4621 	case WM_T_ICH8:
4622 	case WM_T_ICH9:
4623 	case WM_T_ICH10:
4624 	case WM_T_PCH:
4625 	case WM_T_PCH2:
4626 	case WM_T_PCH_LPT:
4627 	case WM_T_PCH_SPT:
4628 		/* AMT based hardware can now take control from firmware */
4629 		if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4630 			wm_get_hw_control(sc);
4631 		break;
4632 	default:
4633 		break;
4634 	}
4635 
4636 	/* Init hardware bits */
4637 	wm_initialize_hardware_bits(sc);
4638 
4639 	/* Reset the PHY. */
4640 	if (sc->sc_flags & WM_F_HAS_MII)
4641 		wm_gmii_reset(sc);
4642 
4643 	/* Calculate (E)ITR value */
4644 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4645 		sc->sc_itr = 450;	/* For EITR */
4646 	} else if (sc->sc_type >= WM_T_82543) {
4647 		/*
4648 		 * Set up the interrupt throttling register (units of 256ns)
4649 		 * Note that a footnote in Intel's documentation says this
4650 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4651 		 * or 10Mbit mode.  Empirically, it appears to be the case
4652 		 * that that is also true for the 1024ns units of the other
4653 		 * interrupt-related timer registers -- so, really, we ought
4654 		 * to divide this value by 4 when the link speed is low.
4655 		 *
4656 		 * XXX implement this division at link speed change!
4657 		 */
4658 
4659 		/*
4660 		 * For N interrupts/sec, set this value to:
4661 		 * 1000000000 / (N * 256).  Note that we set the
4662 		 * absolute and packet timer values to this value
4663 		 * divided by 4 to get "simple timer" behavior.
4664 		 */
4665 
4666 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4667 	}
4668 
4669 	error = wm_init_txrx_queues(sc);
4670 	if (error)
4671 		goto out;
4672 
4673 	/*
4674 	 * Clear out the VLAN table -- we don't use it (yet).
4675 	 */
4676 	CSR_WRITE(sc, WMREG_VET, 0);
4677 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4678 		trynum = 10; /* Due to hw errata */
4679 	else
4680 		trynum = 1;
4681 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4682 		for (j = 0; j < trynum; j++)
4683 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4684 
4685 	/*
4686 	 * Set up flow-control parameters.
4687 	 *
4688 	 * XXX Values could probably stand some tuning.
4689 	 */
4690 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4691 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4692 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4693 	    && (sc->sc_type != WM_T_PCH_SPT)) {
4694 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4695 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4696 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4697 	}
4698 
4699 	sc->sc_fcrtl = FCRTL_DFLT;
4700 	if (sc->sc_type < WM_T_82543) {
4701 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4702 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4703 	} else {
4704 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4705 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4706 	}
4707 
4708 	if (sc->sc_type == WM_T_80003)
4709 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4710 	else
4711 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4712 
4713 	/* Writes the control register. */
4714 	wm_set_vlan(sc);
4715 
4716 	if (sc->sc_flags & WM_F_HAS_MII) {
4717 		int val;
4718 
4719 		switch (sc->sc_type) {
4720 		case WM_T_80003:
4721 		case WM_T_ICH8:
4722 		case WM_T_ICH9:
4723 		case WM_T_ICH10:
4724 		case WM_T_PCH:
4725 		case WM_T_PCH2:
4726 		case WM_T_PCH_LPT:
4727 		case WM_T_PCH_SPT:
4728 			/*
4729 			 * Set the mac to wait the maximum time between each
4730 			 * iteration and increase the max iterations when
4731 			 * polling the phy; this fixes erroneous timeouts at
4732 			 * 10Mbps.
4733 			 */
4734 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4735 			    0xFFFF);
4736 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4737 			val |= 0x3F;
4738 			wm_kmrn_writereg(sc,
4739 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4740 			break;
4741 		default:
4742 			break;
4743 		}
4744 
4745 		if (sc->sc_type == WM_T_80003) {
4746 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4747 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4748 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4749 
4750 			/* Bypass RX and TX FIFO's */
4751 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4752 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4753 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4754 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4755 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4756 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4757 		}
4758 	}
4759 #if 0
4760 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4761 #endif
4762 
4763 	/* Set up checksum offload parameters. */
4764 	reg = CSR_READ(sc, WMREG_RXCSUM);
4765 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4766 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4767 		reg |= RXCSUM_IPOFL;
4768 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4769 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4770 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4771 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4772 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4773 
4774 	/* Set up MSI-X */
4775 	if (sc->sc_nintrs > 1) {
4776 		uint32_t ivar;
4777 		struct wm_queue *wmq;
4778 		int qid, qintr_idx;
4779 
4780 		if (sc->sc_type == WM_T_82575) {
4781 			/* Interrupt control */
4782 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4783 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4784 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4785 
4786 			/* TX and RX */
4787 			for (i = 0; i < sc->sc_nqueues; i++) {
4788 				wmq = &sc->sc_queue[i];
4789 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
4790 				    EITR_TX_QUEUE(wmq->wmq_id)
4791 				    | EITR_RX_QUEUE(wmq->wmq_id));
4792 			}
4793 			/* Link status */
4794 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4795 			    EITR_OTHER);
4796 		} else if (sc->sc_type == WM_T_82574) {
4797 			/* Interrupt control */
4798 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4799 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4800 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4801 
4802 			ivar = 0;
4803 			/* TX and RX */
4804 			for (i = 0; i < sc->sc_nqueues; i++) {
4805 				wmq = &sc->sc_queue[i];
4806 				qid = wmq->wmq_id;
4807 				qintr_idx = wmq->wmq_intr_idx;
4808 
4809 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4810 				    IVAR_TX_MASK_Q_82574(qid));
4811 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4812 				    IVAR_RX_MASK_Q_82574(qid));
4813 			}
4814 			/* Link status */
4815 			ivar |= __SHIFTIN((IVAR_VALID_82574
4816 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4817 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4818 		} else {
4819 			/* Interrupt control */
4820 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4821 			    | GPIE_EIAME | GPIE_PBA);
4822 
4823 			switch (sc->sc_type) {
4824 			case WM_T_82580:
4825 			case WM_T_I350:
4826 			case WM_T_I354:
4827 			case WM_T_I210:
4828 			case WM_T_I211:
4829 				/* TX and RX */
4830 				for (i = 0; i < sc->sc_nqueues; i++) {
4831 					wmq = &sc->sc_queue[i];
4832 					qid = wmq->wmq_id;
4833 					qintr_idx = wmq->wmq_intr_idx;
4834 
4835 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
4836 					ivar &= ~IVAR_TX_MASK_Q(qid);
4837 					ivar |= __SHIFTIN((qintr_idx
4838 						| IVAR_VALID),
4839 					    IVAR_TX_MASK_Q(qid));
4840 					ivar &= ~IVAR_RX_MASK_Q(qid);
4841 					ivar |= __SHIFTIN((qintr_idx
4842 						| IVAR_VALID),
4843 					    IVAR_RX_MASK_Q(qid));
4844 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
4845 				}
4846 				break;
4847 			case WM_T_82576:
4848 				/* TX and RX */
4849 				for (i = 0; i < sc->sc_nqueues; i++) {
4850 					wmq = &sc->sc_queue[i];
4851 					qid = wmq->wmq_id;
4852 					qintr_idx = wmq->wmq_intr_idx;
4853 
4854 					ivar = CSR_READ(sc,
4855 					    WMREG_IVAR_Q_82576(qid));
4856 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
4857 					ivar |= __SHIFTIN((qintr_idx
4858 						| IVAR_VALID),
4859 					    IVAR_TX_MASK_Q_82576(qid));
4860 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
4861 					ivar |= __SHIFTIN((qintr_idx
4862 						| IVAR_VALID),
4863 					    IVAR_RX_MASK_Q_82576(qid));
4864 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
4865 					    ivar);
4866 				}
4867 				break;
4868 			default:
4869 				break;
4870 			}
4871 
4872 			/* Link status */
4873 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
4874 			    IVAR_MISC_OTHER);
4875 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
4876 		}
4877 
4878 		if (sc->sc_nqueues > 1) {
4879 			wm_init_rss(sc);
4880 
4881 			/*
4882 			** NOTE: Receive Full-Packet Checksum Offload
4883 			** is mutually exclusive with Multiqueue. However
4884 			** this is not the same as TCP/IP checksums which
4885 			** still work.
4886 			*/
4887 			reg = CSR_READ(sc, WMREG_RXCSUM);
4888 			reg |= RXCSUM_PCSD;
4889 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
4890 		}
4891 	}
4892 
4893 	/* Set up the interrupt registers. */
4894 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4895 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
4896 	    ICR_RXO | ICR_RXT0;
4897 	if (sc->sc_nintrs > 1) {
4898 		uint32_t mask;
4899 		struct wm_queue *wmq;
4900 
4901 		switch (sc->sc_type) {
4902 		case WM_T_82574:
4903 			CSR_WRITE(sc, WMREG_EIAC_82574,
4904 			    WMREG_EIAC_82574_MSIX_MASK);
4905 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
4906 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4907 			break;
4908 		default:
4909 			if (sc->sc_type == WM_T_82575) {
4910 				mask = 0;
4911 				for (i = 0; i < sc->sc_nqueues; i++) {
4912 					wmq = &sc->sc_queue[i];
4913 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
4914 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
4915 				}
4916 				mask |= EITR_OTHER;
4917 			} else {
4918 				mask = 0;
4919 				for (i = 0; i < sc->sc_nqueues; i++) {
4920 					wmq = &sc->sc_queue[i];
4921 					mask |= 1 << wmq->wmq_intr_idx;
4922 				}
4923 				mask |= 1 << sc->sc_link_intr_idx;
4924 			}
4925 			CSR_WRITE(sc, WMREG_EIAC, mask);
4926 			CSR_WRITE(sc, WMREG_EIAM, mask);
4927 			CSR_WRITE(sc, WMREG_EIMS, mask);
4928 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
4929 			break;
4930 		}
4931 	} else
4932 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
4933 
4934 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4935 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4936 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4937 	    || (sc->sc_type == WM_T_PCH_SPT)) {
4938 		reg = CSR_READ(sc, WMREG_KABGTXD);
4939 		reg |= KABGTXD_BGSQLBIAS;
4940 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4941 	}
4942 
4943 	/* Set up the inter-packet gap. */
4944 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
4945 
4946 	if (sc->sc_type >= WM_T_82543) {
4947 		/*
4948 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
4949 		 * the multi queue function with MSI-X.
4950 		 */
4951 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4952 			int qidx;
4953 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4954 				struct wm_queue *wmq = &sc->sc_queue[qidx];
4955 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
4956 				    sc->sc_itr);
4957 			}
4958 			/*
4959 			 * Link interrupts occur much less than TX
4960 			 * interrupts and RX interrupts. So, we don't
4961 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
4962 			 * FreeBSD's if_igb.
4963 			 */
4964 		} else
4965 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
4966 	}
4967 
4968 	/* Set the VLAN ethernetype. */
4969 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
4970 
4971 	/*
4972 	 * Set up the transmit control register; we start out with
4973 	 * a collision distance suitable for FDX, but update it whe
4974 	 * we resolve the media type.
4975 	 */
4976 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
4977 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
4978 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
4979 	if (sc->sc_type >= WM_T_82571)
4980 		sc->sc_tctl |= TCTL_MULR;
4981 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
4982 
4983 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4984 		/* Write TDT after TCTL.EN is set. See the document. */
4985 		CSR_WRITE(sc, WMREG_TDT(0), 0);
4986 	}
4987 
4988 	if (sc->sc_type == WM_T_80003) {
4989 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
4990 		reg &= ~TCTL_EXT_GCEX_MASK;
4991 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
4992 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
4993 	}
4994 
4995 	/* Set the media. */
4996 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
4997 		goto out;
4998 
4999 	/* Configure for OS presence */
5000 	wm_init_manageability(sc);
5001 
5002 	/*
5003 	 * Set up the receive control register; we actually program
5004 	 * the register when we set the receive filter.  Use multicast
5005 	 * address offset type 0.
5006 	 *
5007 	 * Only the i82544 has the ability to strip the incoming
5008 	 * CRC, so we don't enable that feature.
5009 	 */
5010 	sc->sc_mchash_type = 0;
5011 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5012 	    | RCTL_MO(sc->sc_mchash_type);
5013 
5014 	/*
5015 	 * The I350 has a bug where it always strips the CRC whether
5016 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
5017 	 */
5018 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5019 	    || (sc->sc_type == WM_T_I210))
5020 		sc->sc_rctl |= RCTL_SECRC;
5021 
5022 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5023 	    && (ifp->if_mtu > ETHERMTU)) {
5024 		sc->sc_rctl |= RCTL_LPE;
5025 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5026 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5027 	}
5028 
5029 	if (MCLBYTES == 2048) {
5030 		sc->sc_rctl |= RCTL_2k;
5031 	} else {
5032 		if (sc->sc_type >= WM_T_82543) {
5033 			switch (MCLBYTES) {
5034 			case 4096:
5035 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5036 				break;
5037 			case 8192:
5038 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5039 				break;
5040 			case 16384:
5041 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5042 				break;
5043 			default:
5044 				panic("wm_init: MCLBYTES %d unsupported",
5045 				    MCLBYTES);
5046 				break;
5047 			}
5048 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
5049 	}
5050 
5051 	/* Set the receive filter. */
5052 	wm_set_filter(sc);
5053 
5054 	/* Enable ECC */
5055 	switch (sc->sc_type) {
5056 	case WM_T_82571:
5057 		reg = CSR_READ(sc, WMREG_PBA_ECC);
5058 		reg |= PBA_ECC_CORR_EN;
5059 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5060 		break;
5061 	case WM_T_PCH_LPT:
5062 	case WM_T_PCH_SPT:
5063 		reg = CSR_READ(sc, WMREG_PBECCSTS);
5064 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5065 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5066 
5067 		reg = CSR_READ(sc, WMREG_CTRL);
5068 		reg |= CTRL_MEHE;
5069 		CSR_WRITE(sc, WMREG_CTRL, reg);
5070 		break;
5071 	default:
5072 		break;
5073 	}
5074 
5075 	/* On 575 and later set RDT only if RX enabled */
5076 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5077 		int qidx;
5078 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5079 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5080 			for (i = 0; i < WM_NRXDESC; i++) {
5081 				mutex_enter(rxq->rxq_lock);
5082 				wm_init_rxdesc(rxq, i);
5083 				mutex_exit(rxq->rxq_lock);
5084 
5085 			}
5086 		}
5087 	}
5088 
5089 	sc->sc_stopping = false;
5090 
5091 	/* Start the one second link check clock. */
5092 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5093 
5094 	/* ...all done! */
5095 	ifp->if_flags |= IFF_RUNNING;
5096 	ifp->if_flags &= ~IFF_OACTIVE;
5097 
5098  out:
5099 	sc->sc_if_flags = ifp->if_flags;
5100 	if (error)
5101 		log(LOG_ERR, "%s: interface not running\n",
5102 		    device_xname(sc->sc_dev));
5103 	return error;
5104 }
5105 
5106 /*
5107  * wm_stop:		[ifnet interface function]
5108  *
5109  *	Stop transmission on the interface.
5110  */
5111 static void
5112 wm_stop(struct ifnet *ifp, int disable)
5113 {
5114 	struct wm_softc *sc = ifp->if_softc;
5115 
5116 	WM_CORE_LOCK(sc);
5117 	wm_stop_locked(ifp, disable);
5118 	WM_CORE_UNLOCK(sc);
5119 }
5120 
5121 static void
5122 wm_stop_locked(struct ifnet *ifp, int disable)
5123 {
5124 	struct wm_softc *sc = ifp->if_softc;
5125 	struct wm_txsoft *txs;
5126 	int i, qidx;
5127 
5128 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5129 		device_xname(sc->sc_dev), __func__));
5130 	KASSERT(WM_CORE_LOCKED(sc));
5131 
5132 	sc->sc_stopping = true;
5133 
5134 	/* Stop the one second clock. */
5135 	callout_stop(&sc->sc_tick_ch);
5136 
5137 	/* Stop the 82547 Tx FIFO stall check timer. */
5138 	if (sc->sc_type == WM_T_82547)
5139 		callout_stop(&sc->sc_txfifo_ch);
5140 
5141 	if (sc->sc_flags & WM_F_HAS_MII) {
5142 		/* Down the MII. */
5143 		mii_down(&sc->sc_mii);
5144 	} else {
5145 #if 0
5146 		/* Should we clear PHY's status properly? */
5147 		wm_reset(sc);
5148 #endif
5149 	}
5150 
5151 	/* Stop the transmit and receive processes. */
5152 	CSR_WRITE(sc, WMREG_TCTL, 0);
5153 	CSR_WRITE(sc, WMREG_RCTL, 0);
5154 	sc->sc_rctl &= ~RCTL_EN;
5155 
5156 	/*
5157 	 * Clear the interrupt mask to ensure the device cannot assert its
5158 	 * interrupt line.
5159 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5160 	 * service any currently pending or shared interrupt.
5161 	 */
5162 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5163 	sc->sc_icr = 0;
5164 	if (sc->sc_nintrs > 1) {
5165 		if (sc->sc_type != WM_T_82574) {
5166 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5167 			CSR_WRITE(sc, WMREG_EIAC, 0);
5168 		} else
5169 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5170 	}
5171 
5172 	/* Release any queued transmit buffers. */
5173 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5174 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5175 		struct wm_txqueue *txq = &wmq->wmq_txq;
5176 		mutex_enter(txq->txq_lock);
5177 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5178 			txs = &txq->txq_soft[i];
5179 			if (txs->txs_mbuf != NULL) {
5180 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5181 				m_freem(txs->txs_mbuf);
5182 				txs->txs_mbuf = NULL;
5183 			}
5184 		}
5185 		if (sc->sc_type == WM_T_PCH_SPT) {
5186 			pcireg_t preg;
5187 			uint32_t reg;
5188 			int nexttx;
5189 
5190 			/* First, disable MULR fix in FEXTNVM11 */
5191 			reg = CSR_READ(sc, WMREG_FEXTNVM11);
5192 			reg |= FEXTNVM11_DIS_MULRFIX;
5193 			CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
5194 
5195 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5196 			    WM_PCI_DESCRING_STATUS);
5197 			reg = CSR_READ(sc, WMREG_TDLEN(0));
5198 			printf("XXX RST: FLUSH = %08x, len = %u\n",
5199 			    (uint32_t)(preg & DESCRING_STATUS_FLUSH_REQ), reg);
5200 			if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0)
5201 			    && (reg != 0)) {
5202 				/* TX */
5203 				printf("XXX need TX flush (reg = %08x)\n",
5204 				    preg);
5205 				wm_init_tx_descs(sc, txq);
5206 				wm_init_tx_regs(sc, wmq, txq);
5207 				nexttx = txq->txq_next;
5208 				wm_set_dma_addr(
5209 					&txq->txq_descs[nexttx].wtx_addr,
5210 					WM_CDTXADDR(txq, nexttx));
5211 				txq->txq_descs[nexttx].wtx_cmdlen
5212 				    = htole32(WTX_CMD_IFCS | 512);
5213 				wm_cdtxsync(txq, nexttx, 1,
5214 				    BUS_DMASYNC_PREREAD |BUS_DMASYNC_PREWRITE);
5215 				CSR_WRITE(sc, WMREG_TCTL, TCTL_EN);
5216 				CSR_WRITE(sc, WMREG_TDT(0), nexttx);
5217 				CSR_WRITE_FLUSH(sc);
5218 				delay(250);
5219 				CSR_WRITE(sc, WMREG_TCTL, 0);
5220 			}
5221 			preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
5222 			    WM_PCI_DESCRING_STATUS);
5223 			if (preg & DESCRING_STATUS_FLUSH_REQ) {
5224 				/* RX */
5225 				printf("XXX need RX flush\n");
5226 			}
5227 		}
5228 		mutex_exit(txq->txq_lock);
5229 	}
5230 
5231 	/* Mark the interface as down and cancel the watchdog timer. */
5232 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5233 	ifp->if_timer = 0;
5234 
5235 	if (disable) {
5236 		for (i = 0; i < sc->sc_nqueues; i++) {
5237 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5238 			mutex_enter(rxq->rxq_lock);
5239 			wm_rxdrain(rxq);
5240 			mutex_exit(rxq->rxq_lock);
5241 		}
5242 	}
5243 
5244 #if 0 /* notyet */
5245 	if (sc->sc_type >= WM_T_82544)
5246 		CSR_WRITE(sc, WMREG_WUC, 0);
5247 #endif
5248 }
5249 
5250 static void
5251 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5252 {
5253 	struct mbuf *m;
5254 	int i;
5255 
5256 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5257 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5258 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5259 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5260 		    m->m_data, m->m_len, m->m_flags);
5261 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5262 	    i, i == 1 ? "" : "s");
5263 }
5264 
5265 /*
5266  * wm_82547_txfifo_stall:
5267  *
5268  *	Callout used to wait for the 82547 Tx FIFO to drain,
5269  *	reset the FIFO pointers, and restart packet transmission.
5270  */
5271 static void
5272 wm_82547_txfifo_stall(void *arg)
5273 {
5274 	struct wm_softc *sc = arg;
5275 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5276 
5277 	mutex_enter(txq->txq_lock);
5278 
5279 	if (sc->sc_stopping)
5280 		goto out;
5281 
5282 	if (txq->txq_fifo_stall) {
5283 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5284 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5285 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5286 			/*
5287 			 * Packets have drained.  Stop transmitter, reset
5288 			 * FIFO pointers, restart transmitter, and kick
5289 			 * the packet queue.
5290 			 */
5291 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5292 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5293 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5294 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5295 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5296 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5297 			CSR_WRITE(sc, WMREG_TCTL, tctl);
5298 			CSR_WRITE_FLUSH(sc);
5299 
5300 			txq->txq_fifo_head = 0;
5301 			txq->txq_fifo_stall = 0;
5302 			wm_start_locked(&sc->sc_ethercom.ec_if);
5303 		} else {
5304 			/*
5305 			 * Still waiting for packets to drain; try again in
5306 			 * another tick.
5307 			 */
5308 			callout_schedule(&sc->sc_txfifo_ch, 1);
5309 		}
5310 	}
5311 
5312 out:
5313 	mutex_exit(txq->txq_lock);
5314 }
5315 
5316 /*
5317  * wm_82547_txfifo_bugchk:
5318  *
5319  *	Check for bug condition in the 82547 Tx FIFO.  We need to
5320  *	prevent enqueueing a packet that would wrap around the end
5321  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
5322  *
5323  *	We do this by checking the amount of space before the end
5324  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
5325  *	the Tx FIFO, wait for all remaining packets to drain, reset
5326  *	the internal FIFO pointers to the beginning, and restart
5327  *	transmission on the interface.
5328  */
5329 #define	WM_FIFO_HDR		0x10
5330 #define	WM_82547_PAD_LEN	0x3e0
5331 static int
5332 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5333 {
5334 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5335 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
5336 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5337 
5338 	/* Just return if already stalled. */
5339 	if (txq->txq_fifo_stall)
5340 		return 1;
5341 
5342 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5343 		/* Stall only occurs in half-duplex mode. */
5344 		goto send_packet;
5345 	}
5346 
5347 	if (len >= WM_82547_PAD_LEN + space) {
5348 		txq->txq_fifo_stall = 1;
5349 		callout_schedule(&sc->sc_txfifo_ch, 1);
5350 		return 1;
5351 	}
5352 
5353  send_packet:
5354 	txq->txq_fifo_head += len;
5355 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
5356 		txq->txq_fifo_head -= txq->txq_fifo_size;
5357 
5358 	return 0;
5359 }
5360 
5361 static int
5362 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5363 {
5364 	int error;
5365 
5366 	/*
5367 	 * Allocate the control data structures, and create and load the
5368 	 * DMA map for it.
5369 	 *
5370 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5371 	 * memory.  So must Rx descriptors.  We simplify by allocating
5372 	 * both sets within the same 4G segment.
5373 	 */
5374 	if (sc->sc_type < WM_T_82544)
5375 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
5376 	else
5377 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
5378 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5379 		txq->txq_descsize = sizeof(nq_txdesc_t);
5380 	else
5381 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
5382 
5383 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5384 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5385 		    1, &txq->txq_desc_rseg, 0)) != 0) {
5386 		aprint_error_dev(sc->sc_dev,
5387 		    "unable to allocate TX control data, error = %d\n",
5388 		    error);
5389 		goto fail_0;
5390 	}
5391 
5392 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5393 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5394 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5395 		aprint_error_dev(sc->sc_dev,
5396 		    "unable to map TX control data, error = %d\n", error);
5397 		goto fail_1;
5398 	}
5399 
5400 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5401 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5402 		aprint_error_dev(sc->sc_dev,
5403 		    "unable to create TX control data DMA map, error = %d\n",
5404 		    error);
5405 		goto fail_2;
5406 	}
5407 
5408 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5409 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5410 		aprint_error_dev(sc->sc_dev,
5411 		    "unable to load TX control data DMA map, error = %d\n",
5412 		    error);
5413 		goto fail_3;
5414 	}
5415 
5416 	return 0;
5417 
5418  fail_3:
5419 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5420  fail_2:
5421 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5422 	    WM_TXDESCS_SIZE(txq));
5423  fail_1:
5424 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5425  fail_0:
5426 	return error;
5427 }
5428 
5429 static void
5430 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5431 {
5432 
5433 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5434 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5435 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5436 	    WM_TXDESCS_SIZE(txq));
5437 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5438 }
5439 
5440 static int
5441 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5442 {
5443 	int error;
5444 
5445 	/*
5446 	 * Allocate the control data structures, and create and load the
5447 	 * DMA map for it.
5448 	 *
5449 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5450 	 * memory.  So must Rx descriptors.  We simplify by allocating
5451 	 * both sets within the same 4G segment.
5452 	 */
5453 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5454 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5455 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5456 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
5457 		aprint_error_dev(sc->sc_dev,
5458 		    "unable to allocate RX control data, error = %d\n",
5459 		    error);
5460 		goto fail_0;
5461 	}
5462 
5463 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5464 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5465 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5466 		aprint_error_dev(sc->sc_dev,
5467 		    "unable to map RX control data, error = %d\n", error);
5468 		goto fail_1;
5469 	}
5470 
5471 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5472 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5473 		aprint_error_dev(sc->sc_dev,
5474 		    "unable to create RX control data DMA map, error = %d\n",
5475 		    error);
5476 		goto fail_2;
5477 	}
5478 
5479 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5480 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5481 		aprint_error_dev(sc->sc_dev,
5482 		    "unable to load RX control data DMA map, error = %d\n",
5483 		    error);
5484 		goto fail_3;
5485 	}
5486 
5487 	return 0;
5488 
5489  fail_3:
5490 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5491  fail_2:
5492 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5493 	    rxq->rxq_desc_size);
5494  fail_1:
5495 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5496  fail_0:
5497 	return error;
5498 }
5499 
5500 static void
5501 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5502 {
5503 
5504 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5505 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5506 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5507 	    rxq->rxq_desc_size);
5508 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5509 }
5510 
5511 
5512 static int
5513 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5514 {
5515 	int i, error;
5516 
5517 	/* Create the transmit buffer DMA maps. */
5518 	WM_TXQUEUELEN(txq) =
5519 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5520 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5521 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5522 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5523 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5524 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
5525 			aprint_error_dev(sc->sc_dev,
5526 			    "unable to create Tx DMA map %d, error = %d\n",
5527 			    i, error);
5528 			goto fail;
5529 		}
5530 	}
5531 
5532 	return 0;
5533 
5534  fail:
5535 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5536 		if (txq->txq_soft[i].txs_dmamap != NULL)
5537 			bus_dmamap_destroy(sc->sc_dmat,
5538 			    txq->txq_soft[i].txs_dmamap);
5539 	}
5540 	return error;
5541 }
5542 
5543 static void
5544 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5545 {
5546 	int i;
5547 
5548 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5549 		if (txq->txq_soft[i].txs_dmamap != NULL)
5550 			bus_dmamap_destroy(sc->sc_dmat,
5551 			    txq->txq_soft[i].txs_dmamap);
5552 	}
5553 }
5554 
5555 static int
5556 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5557 {
5558 	int i, error;
5559 
5560 	/* Create the receive buffer DMA maps. */
5561 	for (i = 0; i < WM_NRXDESC; i++) {
5562 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5563 			    MCLBYTES, 0, 0,
5564 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5565 			aprint_error_dev(sc->sc_dev,
5566 			    "unable to create Rx DMA map %d error = %d\n",
5567 			    i, error);
5568 			goto fail;
5569 		}
5570 		rxq->rxq_soft[i].rxs_mbuf = NULL;
5571 	}
5572 
5573 	return 0;
5574 
5575  fail:
5576 	for (i = 0; i < WM_NRXDESC; i++) {
5577 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5578 			bus_dmamap_destroy(sc->sc_dmat,
5579 			    rxq->rxq_soft[i].rxs_dmamap);
5580 	}
5581 	return error;
5582 }
5583 
5584 static void
5585 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5586 {
5587 	int i;
5588 
5589 	for (i = 0; i < WM_NRXDESC; i++) {
5590 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5591 			bus_dmamap_destroy(sc->sc_dmat,
5592 			    rxq->rxq_soft[i].rxs_dmamap);
5593 	}
5594 }
5595 
5596 /*
5597  * wm_alloc_quques:
5598  *	Allocate {tx,rx}descs and {tx,rx} buffers
5599  */
5600 static int
5601 wm_alloc_txrx_queues(struct wm_softc *sc)
5602 {
5603 	int i, error, tx_done, rx_done;
5604 
5605 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5606 	    KM_SLEEP);
5607 	if (sc->sc_queue == NULL) {
5608 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5609 		error = ENOMEM;
5610 		goto fail_0;
5611 	}
5612 
5613 	/*
5614 	 * For transmission
5615 	 */
5616 	error = 0;
5617 	tx_done = 0;
5618 	for (i = 0; i < sc->sc_nqueues; i++) {
5619 #ifdef WM_EVENT_COUNTERS
5620 		int j;
5621 		const char *xname;
5622 #endif
5623 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5624 		txq->txq_sc = sc;
5625 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5626 
5627 		error = wm_alloc_tx_descs(sc, txq);
5628 		if (error)
5629 			break;
5630 		error = wm_alloc_tx_buffer(sc, txq);
5631 		if (error) {
5632 			wm_free_tx_descs(sc, txq);
5633 			break;
5634 		}
5635 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5636 		if (txq->txq_interq == NULL) {
5637 			wm_free_tx_descs(sc, txq);
5638 			wm_free_tx_buffer(sc, txq);
5639 			error = ENOMEM;
5640 			break;
5641 		}
5642 
5643 #ifdef WM_EVENT_COUNTERS
5644 		xname = device_xname(sc->sc_dev);
5645 
5646 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
5647 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
5648 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
5649 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
5650 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
5651 
5652 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
5653 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
5654 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
5655 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
5656 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
5657 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
5658 
5659 		for (j = 0; j < WM_NTXSEGS; j++) {
5660 			snprintf(txq->txq_txseg_evcnt_names[j],
5661 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
5662 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
5663 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
5664 		}
5665 
5666 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
5667 
5668 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
5669 #endif /* WM_EVENT_COUNTERS */
5670 
5671 		tx_done++;
5672 	}
5673 	if (error)
5674 		goto fail_1;
5675 
5676 	/*
5677 	 * For recieve
5678 	 */
5679 	error = 0;
5680 	rx_done = 0;
5681 	for (i = 0; i < sc->sc_nqueues; i++) {
5682 #ifdef WM_EVENT_COUNTERS
5683 		const char *xname;
5684 #endif
5685 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5686 		rxq->rxq_sc = sc;
5687 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5688 
5689 		error = wm_alloc_rx_descs(sc, rxq);
5690 		if (error)
5691 			break;
5692 
5693 		error = wm_alloc_rx_buffer(sc, rxq);
5694 		if (error) {
5695 			wm_free_rx_descs(sc, rxq);
5696 			break;
5697 		}
5698 
5699 #ifdef WM_EVENT_COUNTERS
5700 		xname = device_xname(sc->sc_dev);
5701 
5702 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
5703 
5704 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
5705 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
5706 #endif /* WM_EVENT_COUNTERS */
5707 
5708 		rx_done++;
5709 	}
5710 	if (error)
5711 		goto fail_2;
5712 
5713 	return 0;
5714 
5715  fail_2:
5716 	for (i = 0; i < rx_done; i++) {
5717 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5718 		wm_free_rx_buffer(sc, rxq);
5719 		wm_free_rx_descs(sc, rxq);
5720 		if (rxq->rxq_lock)
5721 			mutex_obj_free(rxq->rxq_lock);
5722 	}
5723  fail_1:
5724 	for (i = 0; i < tx_done; i++) {
5725 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5726 		pcq_destroy(txq->txq_interq);
5727 		wm_free_tx_buffer(sc, txq);
5728 		wm_free_tx_descs(sc, txq);
5729 		if (txq->txq_lock)
5730 			mutex_obj_free(txq->txq_lock);
5731 	}
5732 
5733 	kmem_free(sc->sc_queue,
5734 	    sizeof(struct wm_queue) * sc->sc_nqueues);
5735  fail_0:
5736 	return error;
5737 }
5738 
5739 /*
5740  * wm_free_quques:
5741  *	Free {tx,rx}descs and {tx,rx} buffers
5742  */
5743 static void
5744 wm_free_txrx_queues(struct wm_softc *sc)
5745 {
5746 	int i;
5747 
5748 	for (i = 0; i < sc->sc_nqueues; i++) {
5749 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5750 		wm_free_rx_buffer(sc, rxq);
5751 		wm_free_rx_descs(sc, rxq);
5752 		if (rxq->rxq_lock)
5753 			mutex_obj_free(rxq->rxq_lock);
5754 	}
5755 
5756 	for (i = 0; i < sc->sc_nqueues; i++) {
5757 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5758 		wm_free_tx_buffer(sc, txq);
5759 		wm_free_tx_descs(sc, txq);
5760 		if (txq->txq_lock)
5761 			mutex_obj_free(txq->txq_lock);
5762 	}
5763 
5764 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
5765 }
5766 
5767 static void
5768 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5769 {
5770 
5771 	KASSERT(mutex_owned(txq->txq_lock));
5772 
5773 	/* Initialize the transmit descriptor ring. */
5774 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5775 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5776 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5777 	txq->txq_free = WM_NTXDESC(txq);
5778 	txq->txq_next = 0;
5779 }
5780 
5781 static void
5782 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5783     struct wm_txqueue *txq)
5784 {
5785 
5786 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5787 		device_xname(sc->sc_dev), __func__));
5788 	KASSERT(mutex_owned(txq->txq_lock));
5789 
5790 	if (sc->sc_type < WM_T_82543) {
5791 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5792 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5793 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
5794 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5795 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5796 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5797 	} else {
5798 		int qid = wmq->wmq_id;
5799 
5800 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5801 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5802 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
5803 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
5804 
5805 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5806 			/*
5807 			 * Don't write TDT before TCTL.EN is set.
5808 			 * See the document.
5809 			 */
5810 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5811 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5812 			    | TXDCTL_WTHRESH(0));
5813 		else {
5814 			/* ITR / 4 */
5815 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5816 			if (sc->sc_type >= WM_T_82540) {
5817 				/* should be same */
5818 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5819 			}
5820 
5821 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
5822 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5823 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5824 		}
5825 	}
5826 }
5827 
5828 static void
5829 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5830 {
5831 	int i;
5832 
5833 	KASSERT(mutex_owned(txq->txq_lock));
5834 
5835 	/* Initialize the transmit job descriptors. */
5836 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5837 		txq->txq_soft[i].txs_mbuf = NULL;
5838 	txq->txq_sfree = WM_TXQUEUELEN(txq);
5839 	txq->txq_snext = 0;
5840 	txq->txq_sdirty = 0;
5841 }
5842 
5843 static void
5844 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5845     struct wm_txqueue *txq)
5846 {
5847 
5848 	KASSERT(mutex_owned(txq->txq_lock));
5849 
5850 	/*
5851 	 * Set up some register offsets that are different between
5852 	 * the i82542 and the i82543 and later chips.
5853 	 */
5854 	if (sc->sc_type < WM_T_82543)
5855 		txq->txq_tdt_reg = WMREG_OLD_TDT;
5856 	else
5857 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
5858 
5859 	wm_init_tx_descs(sc, txq);
5860 	wm_init_tx_regs(sc, wmq, txq);
5861 	wm_init_tx_buffer(sc, txq);
5862 }
5863 
5864 static void
5865 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5866     struct wm_rxqueue *rxq)
5867 {
5868 
5869 	KASSERT(mutex_owned(rxq->rxq_lock));
5870 
5871 	/*
5872 	 * Initialize the receive descriptor and receive job
5873 	 * descriptor rings.
5874 	 */
5875 	if (sc->sc_type < WM_T_82543) {
5876 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
5877 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
5878 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
5879 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
5880 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
5881 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
5882 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
5883 
5884 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
5885 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
5886 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
5887 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
5888 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
5889 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
5890 	} else {
5891 		int qid = wmq->wmq_id;
5892 
5893 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
5894 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
5895 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
5896 
5897 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5898 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
5899 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
5900 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
5901 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
5902 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
5903 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
5904 			    | RXDCTL_WTHRESH(1));
5905 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5906 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5907 		} else {
5908 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
5909 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
5910 			/* ITR / 4 */
5911 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
5912 			/* MUST be same */
5913 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
5914 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
5915 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
5916 		}
5917 	}
5918 }
5919 
5920 static int
5921 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5922 {
5923 	struct wm_rxsoft *rxs;
5924 	int error, i;
5925 
5926 	KASSERT(mutex_owned(rxq->rxq_lock));
5927 
5928 	for (i = 0; i < WM_NRXDESC; i++) {
5929 		rxs = &rxq->rxq_soft[i];
5930 		if (rxs->rxs_mbuf == NULL) {
5931 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
5932 				log(LOG_ERR, "%s: unable to allocate or map "
5933 				    "rx buffer %d, error = %d\n",
5934 				    device_xname(sc->sc_dev), i, error);
5935 				/*
5936 				 * XXX Should attempt to run with fewer receive
5937 				 * XXX buffers instead of just failing.
5938 				 */
5939 				wm_rxdrain(rxq);
5940 				return ENOMEM;
5941 			}
5942 		} else {
5943 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
5944 				wm_init_rxdesc(rxq, i);
5945 			/*
5946 			 * For 82575 and newer device, the RX descriptors
5947 			 * must be initialized after the setting of RCTL.EN in
5948 			 * wm_set_filter()
5949 			 */
5950 		}
5951 	}
5952 	rxq->rxq_ptr = 0;
5953 	rxq->rxq_discard = 0;
5954 	WM_RXCHAIN_RESET(rxq);
5955 
5956 	return 0;
5957 }
5958 
5959 static int
5960 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5961     struct wm_rxqueue *rxq)
5962 {
5963 
5964 	KASSERT(mutex_owned(rxq->rxq_lock));
5965 
5966 	/*
5967 	 * Set up some register offsets that are different between
5968 	 * the i82542 and the i82543 and later chips.
5969 	 */
5970 	if (sc->sc_type < WM_T_82543)
5971 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
5972 	else
5973 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
5974 
5975 	wm_init_rx_regs(sc, wmq, rxq);
5976 	return wm_init_rx_buffer(sc, rxq);
5977 }
5978 
5979 /*
5980  * wm_init_quques:
5981  *	Initialize {tx,rx}descs and {tx,rx} buffers
5982  */
5983 static int
5984 wm_init_txrx_queues(struct wm_softc *sc)
5985 {
5986 	int i, error = 0;
5987 
5988 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5989 		device_xname(sc->sc_dev), __func__));
5990 
5991 	for (i = 0; i < sc->sc_nqueues; i++) {
5992 		struct wm_queue *wmq = &sc->sc_queue[i];
5993 		struct wm_txqueue *txq = &wmq->wmq_txq;
5994 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5995 
5996 		mutex_enter(txq->txq_lock);
5997 		wm_init_tx_queue(sc, wmq, txq);
5998 		mutex_exit(txq->txq_lock);
5999 
6000 		mutex_enter(rxq->rxq_lock);
6001 		error = wm_init_rx_queue(sc, wmq, rxq);
6002 		mutex_exit(rxq->rxq_lock);
6003 		if (error)
6004 			break;
6005 	}
6006 
6007 	return error;
6008 }
6009 
6010 /*
6011  * wm_tx_offload:
6012  *
6013  *	Set up TCP/IP checksumming parameters for the
6014  *	specified packet.
6015  */
6016 static int
6017 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
6018     uint8_t *fieldsp)
6019 {
6020 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6021 	struct mbuf *m0 = txs->txs_mbuf;
6022 	struct livengood_tcpip_ctxdesc *t;
6023 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
6024 	uint32_t ipcse;
6025 	struct ether_header *eh;
6026 	int offset, iphl;
6027 	uint8_t fields;
6028 
6029 	/*
6030 	 * XXX It would be nice if the mbuf pkthdr had offset
6031 	 * fields for the protocol headers.
6032 	 */
6033 
6034 	eh = mtod(m0, struct ether_header *);
6035 	switch (htons(eh->ether_type)) {
6036 	case ETHERTYPE_IP:
6037 	case ETHERTYPE_IPV6:
6038 		offset = ETHER_HDR_LEN;
6039 		break;
6040 
6041 	case ETHERTYPE_VLAN:
6042 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6043 		break;
6044 
6045 	default:
6046 		/*
6047 		 * Don't support this protocol or encapsulation.
6048 		 */
6049 		*fieldsp = 0;
6050 		*cmdp = 0;
6051 		return 0;
6052 	}
6053 
6054 	if ((m0->m_pkthdr.csum_flags &
6055 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
6056 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6057 	} else {
6058 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6059 	}
6060 	ipcse = offset + iphl - 1;
6061 
6062 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6063 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6064 	seg = 0;
6065 	fields = 0;
6066 
6067 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6068 		int hlen = offset + iphl;
6069 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6070 
6071 		if (__predict_false(m0->m_len <
6072 				    (hlen + sizeof(struct tcphdr)))) {
6073 			/*
6074 			 * TCP/IP headers are not in the first mbuf; we need
6075 			 * to do this the slow and painful way.  Let's just
6076 			 * hope this doesn't happen very often.
6077 			 */
6078 			struct tcphdr th;
6079 
6080 			WM_Q_EVCNT_INCR(txq, txtsopain);
6081 
6082 			m_copydata(m0, hlen, sizeof(th), &th);
6083 			if (v4) {
6084 				struct ip ip;
6085 
6086 				m_copydata(m0, offset, sizeof(ip), &ip);
6087 				ip.ip_len = 0;
6088 				m_copyback(m0,
6089 				    offset + offsetof(struct ip, ip_len),
6090 				    sizeof(ip.ip_len), &ip.ip_len);
6091 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6092 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6093 			} else {
6094 				struct ip6_hdr ip6;
6095 
6096 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6097 				ip6.ip6_plen = 0;
6098 				m_copyback(m0,
6099 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6100 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6101 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6102 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6103 			}
6104 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6105 			    sizeof(th.th_sum), &th.th_sum);
6106 
6107 			hlen += th.th_off << 2;
6108 		} else {
6109 			/*
6110 			 * TCP/IP headers are in the first mbuf; we can do
6111 			 * this the easy way.
6112 			 */
6113 			struct tcphdr *th;
6114 
6115 			if (v4) {
6116 				struct ip *ip =
6117 				    (void *)(mtod(m0, char *) + offset);
6118 				th = (void *)(mtod(m0, char *) + hlen);
6119 
6120 				ip->ip_len = 0;
6121 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6122 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6123 			} else {
6124 				struct ip6_hdr *ip6 =
6125 				    (void *)(mtod(m0, char *) + offset);
6126 				th = (void *)(mtod(m0, char *) + hlen);
6127 
6128 				ip6->ip6_plen = 0;
6129 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6130 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6131 			}
6132 			hlen += th->th_off << 2;
6133 		}
6134 
6135 		if (v4) {
6136 			WM_Q_EVCNT_INCR(txq, txtso);
6137 			cmdlen |= WTX_TCPIP_CMD_IP;
6138 		} else {
6139 			WM_Q_EVCNT_INCR(txq, txtso6);
6140 			ipcse = 0;
6141 		}
6142 		cmd |= WTX_TCPIP_CMD_TSE;
6143 		cmdlen |= WTX_TCPIP_CMD_TSE |
6144 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6145 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6146 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6147 	}
6148 
6149 	/*
6150 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6151 	 * offload feature, if we load the context descriptor, we
6152 	 * MUST provide valid values for IPCSS and TUCSS fields.
6153 	 */
6154 
6155 	ipcs = WTX_TCPIP_IPCSS(offset) |
6156 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6157 	    WTX_TCPIP_IPCSE(ipcse);
6158 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6159 		WM_Q_EVCNT_INCR(txq, txipsum);
6160 		fields |= WTX_IXSM;
6161 	}
6162 
6163 	offset += iphl;
6164 
6165 	if (m0->m_pkthdr.csum_flags &
6166 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6167 		WM_Q_EVCNT_INCR(txq, txtusum);
6168 		fields |= WTX_TXSM;
6169 		tucs = WTX_TCPIP_TUCSS(offset) |
6170 		    WTX_TCPIP_TUCSO(offset +
6171 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6172 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6173 	} else if ((m0->m_pkthdr.csum_flags &
6174 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6175 		WM_Q_EVCNT_INCR(txq, txtusum6);
6176 		fields |= WTX_TXSM;
6177 		tucs = WTX_TCPIP_TUCSS(offset) |
6178 		    WTX_TCPIP_TUCSO(offset +
6179 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6180 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6181 	} else {
6182 		/* Just initialize it to a valid TCP context. */
6183 		tucs = WTX_TCPIP_TUCSS(offset) |
6184 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6185 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6186 	}
6187 
6188 	/* Fill in the context descriptor. */
6189 	t = (struct livengood_tcpip_ctxdesc *)
6190 	    &txq->txq_descs[txq->txq_next];
6191 	t->tcpip_ipcs = htole32(ipcs);
6192 	t->tcpip_tucs = htole32(tucs);
6193 	t->tcpip_cmdlen = htole32(cmdlen);
6194 	t->tcpip_seg = htole32(seg);
6195 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6196 
6197 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6198 	txs->txs_ndesc++;
6199 
6200 	*cmdp = cmd;
6201 	*fieldsp = fields;
6202 
6203 	return 0;
6204 }
6205 
6206 /*
6207  * wm_start:		[ifnet interface function]
6208  *
6209  *	Start packet transmission on the interface.
6210  */
6211 static void
6212 wm_start(struct ifnet *ifp)
6213 {
6214 	struct wm_softc *sc = ifp->if_softc;
6215 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6216 
6217 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6218 
6219 	mutex_enter(txq->txq_lock);
6220 	if (!sc->sc_stopping)
6221 		wm_start_locked(ifp);
6222 	mutex_exit(txq->txq_lock);
6223 }
6224 
6225 static void
6226 wm_start_locked(struct ifnet *ifp)
6227 {
6228 	struct wm_softc *sc = ifp->if_softc;
6229 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6230 	struct mbuf *m0;
6231 	struct m_tag *mtag;
6232 	struct wm_txsoft *txs;
6233 	bus_dmamap_t dmamap;
6234 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6235 	bus_addr_t curaddr;
6236 	bus_size_t seglen, curlen;
6237 	uint32_t cksumcmd;
6238 	uint8_t cksumfields;
6239 
6240 	KASSERT(mutex_owned(txq->txq_lock));
6241 
6242 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6243 		return;
6244 
6245 	/* Remember the previous number of free descriptors. */
6246 	ofree = txq->txq_free;
6247 
6248 	/*
6249 	 * Loop through the send queue, setting up transmit descriptors
6250 	 * until we drain the queue, or use up all available transmit
6251 	 * descriptors.
6252 	 */
6253 	for (;;) {
6254 		m0 = NULL;
6255 
6256 		/* Get a work queue entry. */
6257 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6258 			wm_txeof(sc, txq);
6259 			if (txq->txq_sfree == 0) {
6260 				DPRINTF(WM_DEBUG_TX,
6261 				    ("%s: TX: no free job descriptors\n",
6262 					device_xname(sc->sc_dev)));
6263 				WM_Q_EVCNT_INCR(txq, txsstall);
6264 				break;
6265 			}
6266 		}
6267 
6268 		/* Grab a packet off the queue. */
6269 		IFQ_DEQUEUE(&ifp->if_snd, m0);
6270 		if (m0 == NULL)
6271 			break;
6272 
6273 		DPRINTF(WM_DEBUG_TX,
6274 		    ("%s: TX: have packet to transmit: %p\n",
6275 		    device_xname(sc->sc_dev), m0));
6276 
6277 		txs = &txq->txq_soft[txq->txq_snext];
6278 		dmamap = txs->txs_dmamap;
6279 
6280 		use_tso = (m0->m_pkthdr.csum_flags &
6281 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6282 
6283 		/*
6284 		 * So says the Linux driver:
6285 		 * The controller does a simple calculation to make sure
6286 		 * there is enough room in the FIFO before initiating the
6287 		 * DMA for each buffer.  The calc is:
6288 		 *	4 = ceil(buffer len / MSS)
6289 		 * To make sure we don't overrun the FIFO, adjust the max
6290 		 * buffer len if the MSS drops.
6291 		 */
6292 		dmamap->dm_maxsegsz =
6293 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6294 		    ? m0->m_pkthdr.segsz << 2
6295 		    : WTX_MAX_LEN;
6296 
6297 		/*
6298 		 * Load the DMA map.  If this fails, the packet either
6299 		 * didn't fit in the allotted number of segments, or we
6300 		 * were short on resources.  For the too-many-segments
6301 		 * case, we simply report an error and drop the packet,
6302 		 * since we can't sanely copy a jumbo packet to a single
6303 		 * buffer.
6304 		 */
6305 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6306 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6307 		if (error) {
6308 			if (error == EFBIG) {
6309 				WM_Q_EVCNT_INCR(txq, txdrop);
6310 				log(LOG_ERR, "%s: Tx packet consumes too many "
6311 				    "DMA segments, dropping...\n",
6312 				    device_xname(sc->sc_dev));
6313 				wm_dump_mbuf_chain(sc, m0);
6314 				m_freem(m0);
6315 				continue;
6316 			}
6317 			/*  Short on resources, just stop for now. */
6318 			DPRINTF(WM_DEBUG_TX,
6319 			    ("%s: TX: dmamap load failed: %d\n",
6320 			    device_xname(sc->sc_dev), error));
6321 			break;
6322 		}
6323 
6324 		segs_needed = dmamap->dm_nsegs;
6325 		if (use_tso) {
6326 			/* For sentinel descriptor; see below. */
6327 			segs_needed++;
6328 		}
6329 
6330 		/*
6331 		 * Ensure we have enough descriptors free to describe
6332 		 * the packet.  Note, we always reserve one descriptor
6333 		 * at the end of the ring due to the semantics of the
6334 		 * TDT register, plus one more in the event we need
6335 		 * to load offload context.
6336 		 */
6337 		if (segs_needed > txq->txq_free - 2) {
6338 			/*
6339 			 * Not enough free descriptors to transmit this
6340 			 * packet.  We haven't committed anything yet,
6341 			 * so just unload the DMA map, put the packet
6342 			 * pack on the queue, and punt.  Notify the upper
6343 			 * layer that there are no more slots left.
6344 			 */
6345 			DPRINTF(WM_DEBUG_TX,
6346 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6347 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6348 			    segs_needed, txq->txq_free - 1));
6349 			ifp->if_flags |= IFF_OACTIVE;
6350 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6351 			WM_Q_EVCNT_INCR(txq, txdstall);
6352 			break;
6353 		}
6354 
6355 		/*
6356 		 * Check for 82547 Tx FIFO bug.  We need to do this
6357 		 * once we know we can transmit the packet, since we
6358 		 * do some internal FIFO space accounting here.
6359 		 */
6360 		if (sc->sc_type == WM_T_82547 &&
6361 		    wm_82547_txfifo_bugchk(sc, m0)) {
6362 			DPRINTF(WM_DEBUG_TX,
6363 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
6364 			    device_xname(sc->sc_dev)));
6365 			ifp->if_flags |= IFF_OACTIVE;
6366 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6367 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
6368 			break;
6369 		}
6370 
6371 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6372 
6373 		DPRINTF(WM_DEBUG_TX,
6374 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6375 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6376 
6377 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6378 
6379 		/*
6380 		 * Store a pointer to the packet so that we can free it
6381 		 * later.
6382 		 *
6383 		 * Initially, we consider the number of descriptors the
6384 		 * packet uses the number of DMA segments.  This may be
6385 		 * incremented by 1 if we do checksum offload (a descriptor
6386 		 * is used to set the checksum context).
6387 		 */
6388 		txs->txs_mbuf = m0;
6389 		txs->txs_firstdesc = txq->txq_next;
6390 		txs->txs_ndesc = segs_needed;
6391 
6392 		/* Set up offload parameters for this packet. */
6393 		if (m0->m_pkthdr.csum_flags &
6394 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6395 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6396 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6397 			if (wm_tx_offload(sc, txs, &cksumcmd,
6398 					  &cksumfields) != 0) {
6399 				/* Error message already displayed. */
6400 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6401 				continue;
6402 			}
6403 		} else {
6404 			cksumcmd = 0;
6405 			cksumfields = 0;
6406 		}
6407 
6408 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6409 
6410 		/* Sync the DMA map. */
6411 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6412 		    BUS_DMASYNC_PREWRITE);
6413 
6414 		/* Initialize the transmit descriptor. */
6415 		for (nexttx = txq->txq_next, seg = 0;
6416 		     seg < dmamap->dm_nsegs; seg++) {
6417 			for (seglen = dmamap->dm_segs[seg].ds_len,
6418 			     curaddr = dmamap->dm_segs[seg].ds_addr;
6419 			     seglen != 0;
6420 			     curaddr += curlen, seglen -= curlen,
6421 			     nexttx = WM_NEXTTX(txq, nexttx)) {
6422 				curlen = seglen;
6423 
6424 				/*
6425 				 * So says the Linux driver:
6426 				 * Work around for premature descriptor
6427 				 * write-backs in TSO mode.  Append a
6428 				 * 4-byte sentinel descriptor.
6429 				 */
6430 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6431 				    curlen > 8)
6432 					curlen -= 4;
6433 
6434 				wm_set_dma_addr(
6435 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
6436 				txq->txq_descs[nexttx].wtx_cmdlen
6437 				    = htole32(cksumcmd | curlen);
6438 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
6439 				    = 0;
6440 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
6441 				    = cksumfields;
6442 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6443 				lasttx = nexttx;
6444 
6445 				DPRINTF(WM_DEBUG_TX,
6446 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
6447 				     "len %#04zx\n",
6448 				    device_xname(sc->sc_dev), nexttx,
6449 				    (uint64_t)curaddr, curlen));
6450 			}
6451 		}
6452 
6453 		KASSERT(lasttx != -1);
6454 
6455 		/*
6456 		 * Set up the command byte on the last descriptor of
6457 		 * the packet.  If we're in the interrupt delay window,
6458 		 * delay the interrupt.
6459 		 */
6460 		txq->txq_descs[lasttx].wtx_cmdlen |=
6461 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6462 
6463 		/*
6464 		 * If VLANs are enabled and the packet has a VLAN tag, set
6465 		 * up the descriptor to encapsulate the packet for us.
6466 		 *
6467 		 * This is only valid on the last descriptor of the packet.
6468 		 */
6469 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6470 			txq->txq_descs[lasttx].wtx_cmdlen |=
6471 			    htole32(WTX_CMD_VLE);
6472 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6473 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6474 		}
6475 
6476 		txs->txs_lastdesc = lasttx;
6477 
6478 		DPRINTF(WM_DEBUG_TX,
6479 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
6480 		    device_xname(sc->sc_dev),
6481 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6482 
6483 		/* Sync the descriptors we're using. */
6484 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6485 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6486 
6487 		/* Give the packet to the chip. */
6488 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6489 
6490 		DPRINTF(WM_DEBUG_TX,
6491 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6492 
6493 		DPRINTF(WM_DEBUG_TX,
6494 		    ("%s: TX: finished transmitting packet, job %d\n",
6495 		    device_xname(sc->sc_dev), txq->txq_snext));
6496 
6497 		/* Advance the tx pointer. */
6498 		txq->txq_free -= txs->txs_ndesc;
6499 		txq->txq_next = nexttx;
6500 
6501 		txq->txq_sfree--;
6502 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6503 
6504 		/* Pass the packet to any BPF listeners. */
6505 		bpf_mtap(ifp, m0);
6506 	}
6507 
6508 	if (m0 != NULL) {
6509 		ifp->if_flags |= IFF_OACTIVE;
6510 		WM_Q_EVCNT_INCR(txq, txdrop);
6511 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6512 			__func__));
6513 		m_freem(m0);
6514 	}
6515 
6516 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6517 		/* No more slots; notify upper layer. */
6518 		ifp->if_flags |= IFF_OACTIVE;
6519 	}
6520 
6521 	if (txq->txq_free != ofree) {
6522 		/* Set a watchdog timer in case the chip flakes out. */
6523 		ifp->if_timer = 5;
6524 	}
6525 }
6526 
6527 /*
6528  * wm_nq_tx_offload:
6529  *
6530  *	Set up TCP/IP checksumming parameters for the
6531  *	specified packet, for NEWQUEUE devices
6532  */
6533 static int
6534 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6535     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6536 {
6537 	struct mbuf *m0 = txs->txs_mbuf;
6538 	struct m_tag *mtag;
6539 	uint32_t vl_len, mssidx, cmdc;
6540 	struct ether_header *eh;
6541 	int offset, iphl;
6542 
6543 	/*
6544 	 * XXX It would be nice if the mbuf pkthdr had offset
6545 	 * fields for the protocol headers.
6546 	 */
6547 	*cmdlenp = 0;
6548 	*fieldsp = 0;
6549 
6550 	eh = mtod(m0, struct ether_header *);
6551 	switch (htons(eh->ether_type)) {
6552 	case ETHERTYPE_IP:
6553 	case ETHERTYPE_IPV6:
6554 		offset = ETHER_HDR_LEN;
6555 		break;
6556 
6557 	case ETHERTYPE_VLAN:
6558 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6559 		break;
6560 
6561 	default:
6562 		/* Don't support this protocol or encapsulation. */
6563 		*do_csum = false;
6564 		return 0;
6565 	}
6566 	*do_csum = true;
6567 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6568 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6569 
6570 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6571 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6572 
6573 	if ((m0->m_pkthdr.csum_flags &
6574 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6575 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6576 	} else {
6577 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6578 	}
6579 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6580 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6581 
6582 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6583 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6584 		     << NQTXC_VLLEN_VLAN_SHIFT);
6585 		*cmdlenp |= NQTX_CMD_VLE;
6586 	}
6587 
6588 	mssidx = 0;
6589 
6590 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6591 		int hlen = offset + iphl;
6592 		int tcp_hlen;
6593 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6594 
6595 		if (__predict_false(m0->m_len <
6596 				    (hlen + sizeof(struct tcphdr)))) {
6597 			/*
6598 			 * TCP/IP headers are not in the first mbuf; we need
6599 			 * to do this the slow and painful way.  Let's just
6600 			 * hope this doesn't happen very often.
6601 			 */
6602 			struct tcphdr th;
6603 
6604 			WM_Q_EVCNT_INCR(txq, txtsopain);
6605 
6606 			m_copydata(m0, hlen, sizeof(th), &th);
6607 			if (v4) {
6608 				struct ip ip;
6609 
6610 				m_copydata(m0, offset, sizeof(ip), &ip);
6611 				ip.ip_len = 0;
6612 				m_copyback(m0,
6613 				    offset + offsetof(struct ip, ip_len),
6614 				    sizeof(ip.ip_len), &ip.ip_len);
6615 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6616 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6617 			} else {
6618 				struct ip6_hdr ip6;
6619 
6620 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6621 				ip6.ip6_plen = 0;
6622 				m_copyback(m0,
6623 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6624 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6625 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6626 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6627 			}
6628 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6629 			    sizeof(th.th_sum), &th.th_sum);
6630 
6631 			tcp_hlen = th.th_off << 2;
6632 		} else {
6633 			/*
6634 			 * TCP/IP headers are in the first mbuf; we can do
6635 			 * this the easy way.
6636 			 */
6637 			struct tcphdr *th;
6638 
6639 			if (v4) {
6640 				struct ip *ip =
6641 				    (void *)(mtod(m0, char *) + offset);
6642 				th = (void *)(mtod(m0, char *) + hlen);
6643 
6644 				ip->ip_len = 0;
6645 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6646 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6647 			} else {
6648 				struct ip6_hdr *ip6 =
6649 				    (void *)(mtod(m0, char *) + offset);
6650 				th = (void *)(mtod(m0, char *) + hlen);
6651 
6652 				ip6->ip6_plen = 0;
6653 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6654 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6655 			}
6656 			tcp_hlen = th->th_off << 2;
6657 		}
6658 		hlen += tcp_hlen;
6659 		*cmdlenp |= NQTX_CMD_TSE;
6660 
6661 		if (v4) {
6662 			WM_Q_EVCNT_INCR(txq, txtso);
6663 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6664 		} else {
6665 			WM_Q_EVCNT_INCR(txq, txtso6);
6666 			*fieldsp |= NQTXD_FIELDS_TUXSM;
6667 		}
6668 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6669 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6670 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6671 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6672 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6673 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6674 	} else {
6675 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6676 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6677 	}
6678 
6679 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6680 		*fieldsp |= NQTXD_FIELDS_IXSM;
6681 		cmdc |= NQTXC_CMD_IP4;
6682 	}
6683 
6684 	if (m0->m_pkthdr.csum_flags &
6685 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6686 		WM_Q_EVCNT_INCR(txq, txtusum);
6687 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6688 			cmdc |= NQTXC_CMD_TCP;
6689 		} else {
6690 			cmdc |= NQTXC_CMD_UDP;
6691 		}
6692 		cmdc |= NQTXC_CMD_IP4;
6693 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6694 	}
6695 	if (m0->m_pkthdr.csum_flags &
6696 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6697 		WM_Q_EVCNT_INCR(txq, txtusum6);
6698 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6699 			cmdc |= NQTXC_CMD_TCP;
6700 		} else {
6701 			cmdc |= NQTXC_CMD_UDP;
6702 		}
6703 		cmdc |= NQTXC_CMD_IP6;
6704 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6705 	}
6706 
6707 	/* Fill in the context descriptor. */
6708 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6709 	    htole32(vl_len);
6710 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6711 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6712 	    htole32(cmdc);
6713 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6714 	    htole32(mssidx);
6715 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6716 	DPRINTF(WM_DEBUG_TX,
6717 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6718 	    txq->txq_next, 0, vl_len));
6719 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6720 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6721 	txs->txs_ndesc++;
6722 	return 0;
6723 }
6724 
6725 /*
6726  * wm_nq_start:		[ifnet interface function]
6727  *
6728  *	Start packet transmission on the interface for NEWQUEUE devices
6729  */
6730 static void
6731 wm_nq_start(struct ifnet *ifp)
6732 {
6733 	struct wm_softc *sc = ifp->if_softc;
6734 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6735 
6736 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6737 
6738 	mutex_enter(txq->txq_lock);
6739 	if (!sc->sc_stopping)
6740 		wm_nq_start_locked(ifp);
6741 	mutex_exit(txq->txq_lock);
6742 }
6743 
6744 static void
6745 wm_nq_start_locked(struct ifnet *ifp)
6746 {
6747 	struct wm_softc *sc = ifp->if_softc;
6748 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6749 
6750 	wm_nq_send_common_locked(ifp, txq, false);
6751 }
6752 
6753 static inline int
6754 wm_nq_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6755 {
6756 	struct wm_softc *sc = ifp->if_softc;
6757 	u_int cpuid = cpu_index(curcpu());
6758 
6759 	/*
6760 	 * Currently, simple distribute strategy.
6761 	 * TODO:
6762 	 * destribute by flowid(RSS has value).
6763 	 */
6764 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
6765 }
6766 
6767 static int
6768 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
6769 {
6770 	int qid;
6771 	struct wm_softc *sc = ifp->if_softc;
6772 	struct wm_txqueue *txq;
6773 
6774 	qid = wm_nq_select_txqueue(ifp, m);
6775 	txq = &sc->sc_queue[qid].wmq_txq;
6776 
6777 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6778 		m_freem(m);
6779 		WM_Q_EVCNT_INCR(txq, txdrop);
6780 		return ENOBUFS;
6781 	}
6782 
6783 	if (mutex_tryenter(txq->txq_lock)) {
6784 		/* XXXX should be per TX queue */
6785 		ifp->if_obytes += m->m_pkthdr.len;
6786 		if (m->m_flags & M_MCAST)
6787 			ifp->if_omcasts++;
6788 
6789 		if (!sc->sc_stopping)
6790 			wm_nq_transmit_locked(ifp, txq);
6791 		mutex_exit(txq->txq_lock);
6792 	}
6793 
6794 	return 0;
6795 }
6796 
6797 static void
6798 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6799 {
6800 
6801 	wm_nq_send_common_locked(ifp, txq, true);
6802 }
6803 
6804 static void
6805 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6806     bool is_transmit)
6807 {
6808 	struct wm_softc *sc = ifp->if_softc;
6809 	struct mbuf *m0;
6810 	struct m_tag *mtag;
6811 	struct wm_txsoft *txs;
6812 	bus_dmamap_t dmamap;
6813 	int error, nexttx, lasttx = -1, seg, segs_needed;
6814 	bool do_csum, sent;
6815 
6816 	KASSERT(mutex_owned(txq->txq_lock));
6817 
6818 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6819 		return;
6820 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
6821 		return;
6822 
6823 	sent = false;
6824 
6825 	/*
6826 	 * Loop through the send queue, setting up transmit descriptors
6827 	 * until we drain the queue, or use up all available transmit
6828 	 * descriptors.
6829 	 */
6830 	for (;;) {
6831 		m0 = NULL;
6832 
6833 		/* Get a work queue entry. */
6834 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6835 			wm_txeof(sc, txq);
6836 			if (txq->txq_sfree == 0) {
6837 				DPRINTF(WM_DEBUG_TX,
6838 				    ("%s: TX: no free job descriptors\n",
6839 					device_xname(sc->sc_dev)));
6840 				WM_Q_EVCNT_INCR(txq, txsstall);
6841 				break;
6842 			}
6843 		}
6844 
6845 		/* Grab a packet off the queue. */
6846 		if (is_transmit)
6847 			m0 = pcq_get(txq->txq_interq);
6848 		else
6849 			IFQ_DEQUEUE(&ifp->if_snd, m0);
6850 		if (m0 == NULL)
6851 			break;
6852 
6853 		DPRINTF(WM_DEBUG_TX,
6854 		    ("%s: TX: have packet to transmit: %p\n",
6855 		    device_xname(sc->sc_dev), m0));
6856 
6857 		txs = &txq->txq_soft[txq->txq_snext];
6858 		dmamap = txs->txs_dmamap;
6859 
6860 		/*
6861 		 * Load the DMA map.  If this fails, the packet either
6862 		 * didn't fit in the allotted number of segments, or we
6863 		 * were short on resources.  For the too-many-segments
6864 		 * case, we simply report an error and drop the packet,
6865 		 * since we can't sanely copy a jumbo packet to a single
6866 		 * buffer.
6867 		 */
6868 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6869 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6870 		if (error) {
6871 			if (error == EFBIG) {
6872 				WM_Q_EVCNT_INCR(txq, txdrop);
6873 				log(LOG_ERR, "%s: Tx packet consumes too many "
6874 				    "DMA segments, dropping...\n",
6875 				    device_xname(sc->sc_dev));
6876 				wm_dump_mbuf_chain(sc, m0);
6877 				m_freem(m0);
6878 				continue;
6879 			}
6880 			/* Short on resources, just stop for now. */
6881 			DPRINTF(WM_DEBUG_TX,
6882 			    ("%s: TX: dmamap load failed: %d\n",
6883 			    device_xname(sc->sc_dev), error));
6884 			break;
6885 		}
6886 
6887 		segs_needed = dmamap->dm_nsegs;
6888 
6889 		/*
6890 		 * Ensure we have enough descriptors free to describe
6891 		 * the packet.  Note, we always reserve one descriptor
6892 		 * at the end of the ring due to the semantics of the
6893 		 * TDT register, plus one more in the event we need
6894 		 * to load offload context.
6895 		 */
6896 		if (segs_needed > txq->txq_free - 2) {
6897 			/*
6898 			 * Not enough free descriptors to transmit this
6899 			 * packet.  We haven't committed anything yet,
6900 			 * so just unload the DMA map, put the packet
6901 			 * pack on the queue, and punt.  Notify the upper
6902 			 * layer that there are no more slots left.
6903 			 */
6904 			DPRINTF(WM_DEBUG_TX,
6905 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6906 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6907 			    segs_needed, txq->txq_free - 1));
6908 			txq->txq_flags |= WM_TXQ_NO_SPACE;
6909 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6910 			WM_Q_EVCNT_INCR(txq, txdstall);
6911 			break;
6912 		}
6913 
6914 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6915 
6916 		DPRINTF(WM_DEBUG_TX,
6917 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6918 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6919 
6920 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6921 
6922 		/*
6923 		 * Store a pointer to the packet so that we can free it
6924 		 * later.
6925 		 *
6926 		 * Initially, we consider the number of descriptors the
6927 		 * packet uses the number of DMA segments.  This may be
6928 		 * incremented by 1 if we do checksum offload (a descriptor
6929 		 * is used to set the checksum context).
6930 		 */
6931 		txs->txs_mbuf = m0;
6932 		txs->txs_firstdesc = txq->txq_next;
6933 		txs->txs_ndesc = segs_needed;
6934 
6935 		/* Set up offload parameters for this packet. */
6936 		uint32_t cmdlen, fields, dcmdlen;
6937 		if (m0->m_pkthdr.csum_flags &
6938 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6939 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6940 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6941 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
6942 			    &do_csum) != 0) {
6943 				/* Error message already displayed. */
6944 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6945 				continue;
6946 			}
6947 		} else {
6948 			do_csum = false;
6949 			cmdlen = 0;
6950 			fields = 0;
6951 		}
6952 
6953 		/* Sync the DMA map. */
6954 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6955 		    BUS_DMASYNC_PREWRITE);
6956 
6957 		/* Initialize the first transmit descriptor. */
6958 		nexttx = txq->txq_next;
6959 		if (!do_csum) {
6960 			/* setup a legacy descriptor */
6961 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
6962 			    dmamap->dm_segs[0].ds_addr);
6963 			txq->txq_descs[nexttx].wtx_cmdlen =
6964 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
6965 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
6966 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
6967 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
6968 			    NULL) {
6969 				txq->txq_descs[nexttx].wtx_cmdlen |=
6970 				    htole32(WTX_CMD_VLE);
6971 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
6972 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6973 			} else {
6974 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6975 			}
6976 			dcmdlen = 0;
6977 		} else {
6978 			/* setup an advanced data descriptor */
6979 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
6980 			    htole64(dmamap->dm_segs[0].ds_addr);
6981 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
6982 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
6983 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
6984 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
6985 			    htole32(fields);
6986 			DPRINTF(WM_DEBUG_TX,
6987 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
6988 			    device_xname(sc->sc_dev), nexttx,
6989 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
6990 			DPRINTF(WM_DEBUG_TX,
6991 			    ("\t 0x%08x%08x\n", fields,
6992 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
6993 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
6994 		}
6995 
6996 		lasttx = nexttx;
6997 		nexttx = WM_NEXTTX(txq, nexttx);
6998 		/*
6999 		 * fill in the next descriptors. legacy or adcanced format
7000 		 * is the same here
7001 		 */
7002 		for (seg = 1; seg < dmamap->dm_nsegs;
7003 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7004 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7005 			    htole64(dmamap->dm_segs[seg].ds_addr);
7006 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7007 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7008 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7009 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7010 			lasttx = nexttx;
7011 
7012 			DPRINTF(WM_DEBUG_TX,
7013 			    ("%s: TX: desc %d: %#" PRIx64 ", "
7014 			     "len %#04zx\n",
7015 			    device_xname(sc->sc_dev), nexttx,
7016 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
7017 			    dmamap->dm_segs[seg].ds_len));
7018 		}
7019 
7020 		KASSERT(lasttx != -1);
7021 
7022 		/*
7023 		 * Set up the command byte on the last descriptor of
7024 		 * the packet.  If we're in the interrupt delay window,
7025 		 * delay the interrupt.
7026 		 */
7027 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7028 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
7029 		txq->txq_descs[lasttx].wtx_cmdlen |=
7030 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7031 
7032 		txs->txs_lastdesc = lasttx;
7033 
7034 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7035 		    device_xname(sc->sc_dev),
7036 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7037 
7038 		/* Sync the descriptors we're using. */
7039 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7040 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7041 
7042 		/* Give the packet to the chip. */
7043 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7044 		sent = true;
7045 
7046 		DPRINTF(WM_DEBUG_TX,
7047 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7048 
7049 		DPRINTF(WM_DEBUG_TX,
7050 		    ("%s: TX: finished transmitting packet, job %d\n",
7051 		    device_xname(sc->sc_dev), txq->txq_snext));
7052 
7053 		/* Advance the tx pointer. */
7054 		txq->txq_free -= txs->txs_ndesc;
7055 		txq->txq_next = nexttx;
7056 
7057 		txq->txq_sfree--;
7058 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7059 
7060 		/* Pass the packet to any BPF listeners. */
7061 		bpf_mtap(ifp, m0);
7062 	}
7063 
7064 	if (m0 != NULL) {
7065 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7066 		WM_Q_EVCNT_INCR(txq, txdrop);
7067 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7068 			__func__));
7069 		m_freem(m0);
7070 	}
7071 
7072 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7073 		/* No more slots; notify upper layer. */
7074 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7075 	}
7076 
7077 	if (sent) {
7078 		/* Set a watchdog timer in case the chip flakes out. */
7079 		ifp->if_timer = 5;
7080 	}
7081 }
7082 
7083 /* Interrupt */
7084 
7085 /*
7086  * wm_txeof:
7087  *
7088  *	Helper; handle transmit interrupts.
7089  */
7090 static int
7091 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
7092 {
7093 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7094 	struct wm_txsoft *txs;
7095 	bool processed = false;
7096 	int count = 0;
7097 	int i;
7098 	uint8_t status;
7099 
7100 	KASSERT(mutex_owned(txq->txq_lock));
7101 
7102 	if (sc->sc_stopping)
7103 		return 0;
7104 
7105 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7106 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
7107 	else
7108 		ifp->if_flags &= ~IFF_OACTIVE;
7109 
7110 	/*
7111 	 * Go through the Tx list and free mbufs for those
7112 	 * frames which have been transmitted.
7113 	 */
7114 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7115 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7116 		txs = &txq->txq_soft[i];
7117 
7118 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7119 			device_xname(sc->sc_dev), i));
7120 
7121 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7122 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7123 
7124 		status =
7125 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7126 		if ((status & WTX_ST_DD) == 0) {
7127 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7128 			    BUS_DMASYNC_PREREAD);
7129 			break;
7130 		}
7131 
7132 		processed = true;
7133 		count++;
7134 		DPRINTF(WM_DEBUG_TX,
7135 		    ("%s: TX: job %d done: descs %d..%d\n",
7136 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7137 		    txs->txs_lastdesc));
7138 
7139 		/*
7140 		 * XXX We should probably be using the statistics
7141 		 * XXX registers, but I don't know if they exist
7142 		 * XXX on chips before the i82544.
7143 		 */
7144 
7145 #ifdef WM_EVENT_COUNTERS
7146 		if (status & WTX_ST_TU)
7147 			WM_Q_EVCNT_INCR(txq, tu);
7148 #endif /* WM_EVENT_COUNTERS */
7149 
7150 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
7151 			ifp->if_oerrors++;
7152 			if (status & WTX_ST_LC)
7153 				log(LOG_WARNING, "%s: late collision\n",
7154 				    device_xname(sc->sc_dev));
7155 			else if (status & WTX_ST_EC) {
7156 				ifp->if_collisions += 16;
7157 				log(LOG_WARNING, "%s: excessive collisions\n",
7158 				    device_xname(sc->sc_dev));
7159 			}
7160 		} else
7161 			ifp->if_opackets++;
7162 
7163 		txq->txq_free += txs->txs_ndesc;
7164 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7165 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7166 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7167 		m_freem(txs->txs_mbuf);
7168 		txs->txs_mbuf = NULL;
7169 	}
7170 
7171 	/* Update the dirty transmit buffer pointer. */
7172 	txq->txq_sdirty = i;
7173 	DPRINTF(WM_DEBUG_TX,
7174 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7175 
7176 	if (count != 0)
7177 		rnd_add_uint32(&sc->rnd_source, count);
7178 
7179 	/*
7180 	 * If there are no more pending transmissions, cancel the watchdog
7181 	 * timer.
7182 	 */
7183 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7184 		ifp->if_timer = 0;
7185 
7186 	return processed;
7187 }
7188 
7189 /*
7190  * wm_rxeof:
7191  *
7192  *	Helper; handle receive interrupts.
7193  */
7194 static void
7195 wm_rxeof(struct wm_rxqueue *rxq)
7196 {
7197 	struct wm_softc *sc = rxq->rxq_sc;
7198 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7199 	struct wm_rxsoft *rxs;
7200 	struct mbuf *m;
7201 	int i, len;
7202 	int count = 0;
7203 	uint8_t status, errors;
7204 	uint16_t vlantag;
7205 
7206 	KASSERT(mutex_owned(rxq->rxq_lock));
7207 
7208 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7209 		rxs = &rxq->rxq_soft[i];
7210 
7211 		DPRINTF(WM_DEBUG_RX,
7212 		    ("%s: RX: checking descriptor %d\n",
7213 		    device_xname(sc->sc_dev), i));
7214 
7215 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7216 
7217 		status = rxq->rxq_descs[i].wrx_status;
7218 		errors = rxq->rxq_descs[i].wrx_errors;
7219 		len = le16toh(rxq->rxq_descs[i].wrx_len);
7220 		vlantag = rxq->rxq_descs[i].wrx_special;
7221 
7222 		if ((status & WRX_ST_DD) == 0) {
7223 			/* We have processed all of the receive descriptors. */
7224 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
7225 			break;
7226 		}
7227 
7228 		count++;
7229 		if (__predict_false(rxq->rxq_discard)) {
7230 			DPRINTF(WM_DEBUG_RX,
7231 			    ("%s: RX: discarding contents of descriptor %d\n",
7232 			    device_xname(sc->sc_dev), i));
7233 			wm_init_rxdesc(rxq, i);
7234 			if (status & WRX_ST_EOP) {
7235 				/* Reset our state. */
7236 				DPRINTF(WM_DEBUG_RX,
7237 				    ("%s: RX: resetting rxdiscard -> 0\n",
7238 				    device_xname(sc->sc_dev)));
7239 				rxq->rxq_discard = 0;
7240 			}
7241 			continue;
7242 		}
7243 
7244 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7245 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7246 
7247 		m = rxs->rxs_mbuf;
7248 
7249 		/*
7250 		 * Add a new receive buffer to the ring, unless of
7251 		 * course the length is zero. Treat the latter as a
7252 		 * failed mapping.
7253 		 */
7254 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7255 			/*
7256 			 * Failed, throw away what we've done so
7257 			 * far, and discard the rest of the packet.
7258 			 */
7259 			ifp->if_ierrors++;
7260 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7261 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7262 			wm_init_rxdesc(rxq, i);
7263 			if ((status & WRX_ST_EOP) == 0)
7264 				rxq->rxq_discard = 1;
7265 			if (rxq->rxq_head != NULL)
7266 				m_freem(rxq->rxq_head);
7267 			WM_RXCHAIN_RESET(rxq);
7268 			DPRINTF(WM_DEBUG_RX,
7269 			    ("%s: RX: Rx buffer allocation failed, "
7270 			    "dropping packet%s\n", device_xname(sc->sc_dev),
7271 			    rxq->rxq_discard ? " (discard)" : ""));
7272 			continue;
7273 		}
7274 
7275 		m->m_len = len;
7276 		rxq->rxq_len += len;
7277 		DPRINTF(WM_DEBUG_RX,
7278 		    ("%s: RX: buffer at %p len %d\n",
7279 		    device_xname(sc->sc_dev), m->m_data, len));
7280 
7281 		/* If this is not the end of the packet, keep looking. */
7282 		if ((status & WRX_ST_EOP) == 0) {
7283 			WM_RXCHAIN_LINK(rxq, m);
7284 			DPRINTF(WM_DEBUG_RX,
7285 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
7286 			    device_xname(sc->sc_dev), rxq->rxq_len));
7287 			continue;
7288 		}
7289 
7290 		/*
7291 		 * Okay, we have the entire packet now.  The chip is
7292 		 * configured to include the FCS except I350 and I21[01]
7293 		 * (not all chips can be configured to strip it),
7294 		 * so we need to trim it.
7295 		 * May need to adjust length of previous mbuf in the
7296 		 * chain if the current mbuf is too short.
7297 		 * For an eratta, the RCTL_SECRC bit in RCTL register
7298 		 * is always set in I350, so we don't trim it.
7299 		 */
7300 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7301 		    && (sc->sc_type != WM_T_I210)
7302 		    && (sc->sc_type != WM_T_I211)) {
7303 			if (m->m_len < ETHER_CRC_LEN) {
7304 				rxq->rxq_tail->m_len
7305 				    -= (ETHER_CRC_LEN - m->m_len);
7306 				m->m_len = 0;
7307 			} else
7308 				m->m_len -= ETHER_CRC_LEN;
7309 			len = rxq->rxq_len - ETHER_CRC_LEN;
7310 		} else
7311 			len = rxq->rxq_len;
7312 
7313 		WM_RXCHAIN_LINK(rxq, m);
7314 
7315 		*rxq->rxq_tailp = NULL;
7316 		m = rxq->rxq_head;
7317 
7318 		WM_RXCHAIN_RESET(rxq);
7319 
7320 		DPRINTF(WM_DEBUG_RX,
7321 		    ("%s: RX: have entire packet, len -> %d\n",
7322 		    device_xname(sc->sc_dev), len));
7323 
7324 		/* If an error occurred, update stats and drop the packet. */
7325 		if (errors &
7326 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7327 			if (errors & WRX_ER_SE)
7328 				log(LOG_WARNING, "%s: symbol error\n",
7329 				    device_xname(sc->sc_dev));
7330 			else if (errors & WRX_ER_SEQ)
7331 				log(LOG_WARNING, "%s: receive sequence error\n",
7332 				    device_xname(sc->sc_dev));
7333 			else if (errors & WRX_ER_CE)
7334 				log(LOG_WARNING, "%s: CRC error\n",
7335 				    device_xname(sc->sc_dev));
7336 			m_freem(m);
7337 			continue;
7338 		}
7339 
7340 		/* No errors.  Receive the packet. */
7341 		m_set_rcvif(m, ifp);
7342 		m->m_pkthdr.len = len;
7343 
7344 		/*
7345 		 * If VLANs are enabled, VLAN packets have been unwrapped
7346 		 * for us.  Associate the tag with the packet.
7347 		 */
7348 		/* XXXX should check for i350 and i354 */
7349 		if ((status & WRX_ST_VP) != 0) {
7350 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7351 		}
7352 
7353 		/* Set up checksum info for this packet. */
7354 		if ((status & WRX_ST_IXSM) == 0) {
7355 			if (status & WRX_ST_IPCS) {
7356 				WM_Q_EVCNT_INCR(rxq, rxipsum);
7357 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7358 				if (errors & WRX_ER_IPE)
7359 					m->m_pkthdr.csum_flags |=
7360 					    M_CSUM_IPv4_BAD;
7361 			}
7362 			if (status & WRX_ST_TCPCS) {
7363 				/*
7364 				 * Note: we don't know if this was TCP or UDP,
7365 				 * so we just set both bits, and expect the
7366 				 * upper layers to deal.
7367 				 */
7368 				WM_Q_EVCNT_INCR(rxq, rxtusum);
7369 				m->m_pkthdr.csum_flags |=
7370 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7371 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
7372 				if (errors & WRX_ER_TCPE)
7373 					m->m_pkthdr.csum_flags |=
7374 					    M_CSUM_TCP_UDP_BAD;
7375 			}
7376 		}
7377 
7378 		ifp->if_ipackets++;
7379 
7380 		mutex_exit(rxq->rxq_lock);
7381 
7382 		/* Pass this up to any BPF listeners. */
7383 		bpf_mtap(ifp, m);
7384 
7385 		/* Pass it on. */
7386 		if_percpuq_enqueue(sc->sc_ipq, m);
7387 
7388 		mutex_enter(rxq->rxq_lock);
7389 
7390 		if (sc->sc_stopping)
7391 			break;
7392 	}
7393 
7394 	/* Update the receive pointer. */
7395 	rxq->rxq_ptr = i;
7396 	if (count != 0)
7397 		rnd_add_uint32(&sc->rnd_source, count);
7398 
7399 	DPRINTF(WM_DEBUG_RX,
7400 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7401 }
7402 
7403 /*
7404  * wm_linkintr_gmii:
7405  *
7406  *	Helper; handle link interrupts for GMII.
7407  */
7408 static void
7409 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7410 {
7411 
7412 	KASSERT(WM_CORE_LOCKED(sc));
7413 
7414 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7415 		__func__));
7416 
7417 	if (icr & ICR_LSC) {
7418 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
7419 
7420 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7421 			wm_gig_downshift_workaround_ich8lan(sc);
7422 
7423 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7424 			device_xname(sc->sc_dev)));
7425 		mii_pollstat(&sc->sc_mii);
7426 		if (sc->sc_type == WM_T_82543) {
7427 			int miistatus, active;
7428 
7429 			/*
7430 			 * With 82543, we need to force speed and
7431 			 * duplex on the MAC equal to what the PHY
7432 			 * speed and duplex configuration is.
7433 			 */
7434 			miistatus = sc->sc_mii.mii_media_status;
7435 
7436 			if (miistatus & IFM_ACTIVE) {
7437 				active = sc->sc_mii.mii_media_active;
7438 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7439 				switch (IFM_SUBTYPE(active)) {
7440 				case IFM_10_T:
7441 					sc->sc_ctrl |= CTRL_SPEED_10;
7442 					break;
7443 				case IFM_100_TX:
7444 					sc->sc_ctrl |= CTRL_SPEED_100;
7445 					break;
7446 				case IFM_1000_T:
7447 					sc->sc_ctrl |= CTRL_SPEED_1000;
7448 					break;
7449 				default:
7450 					/*
7451 					 * fiber?
7452 					 * Shoud not enter here.
7453 					 */
7454 					printf("unknown media (%x)\n", active);
7455 					break;
7456 				}
7457 				if (active & IFM_FDX)
7458 					sc->sc_ctrl |= CTRL_FD;
7459 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7460 			}
7461 		} else if ((sc->sc_type == WM_T_ICH8)
7462 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
7463 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
7464 		} else if (sc->sc_type == WM_T_PCH) {
7465 			wm_k1_gig_workaround_hv(sc,
7466 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7467 		}
7468 
7469 		if ((sc->sc_phytype == WMPHY_82578)
7470 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7471 			== IFM_1000_T)) {
7472 
7473 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7474 				delay(200*1000); /* XXX too big */
7475 
7476 				/* Link stall fix for link up */
7477 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7478 				    HV_MUX_DATA_CTRL,
7479 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
7480 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
7481 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7482 				    HV_MUX_DATA_CTRL,
7483 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
7484 			}
7485 		}
7486 	} else if (icr & ICR_RXSEQ) {
7487 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7488 			device_xname(sc->sc_dev)));
7489 	}
7490 }
7491 
7492 /*
7493  * wm_linkintr_tbi:
7494  *
7495  *	Helper; handle link interrupts for TBI mode.
7496  */
7497 static void
7498 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7499 {
7500 	uint32_t status;
7501 
7502 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7503 		__func__));
7504 
7505 	status = CSR_READ(sc, WMREG_STATUS);
7506 	if (icr & ICR_LSC) {
7507 		if (status & STATUS_LU) {
7508 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7509 			    device_xname(sc->sc_dev),
7510 			    (status & STATUS_FD) ? "FDX" : "HDX"));
7511 			/*
7512 			 * NOTE: CTRL will update TFCE and RFCE automatically,
7513 			 * so we should update sc->sc_ctrl
7514 			 */
7515 
7516 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7517 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7518 			sc->sc_fcrtl &= ~FCRTL_XONE;
7519 			if (status & STATUS_FD)
7520 				sc->sc_tctl |=
7521 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7522 			else
7523 				sc->sc_tctl |=
7524 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7525 			if (sc->sc_ctrl & CTRL_TFCE)
7526 				sc->sc_fcrtl |= FCRTL_XONE;
7527 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7528 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7529 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
7530 				      sc->sc_fcrtl);
7531 			sc->sc_tbi_linkup = 1;
7532 		} else {
7533 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7534 			    device_xname(sc->sc_dev)));
7535 			sc->sc_tbi_linkup = 0;
7536 		}
7537 		/* Update LED */
7538 		wm_tbi_serdes_set_linkled(sc);
7539 	} else if (icr & ICR_RXSEQ) {
7540 		DPRINTF(WM_DEBUG_LINK,
7541 		    ("%s: LINK: Receive sequence error\n",
7542 		    device_xname(sc->sc_dev)));
7543 	}
7544 }
7545 
7546 /*
7547  * wm_linkintr_serdes:
7548  *
7549  *	Helper; handle link interrupts for TBI mode.
7550  */
7551 static void
7552 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7553 {
7554 	struct mii_data *mii = &sc->sc_mii;
7555 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7556 	uint32_t pcs_adv, pcs_lpab, reg;
7557 
7558 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7559 		__func__));
7560 
7561 	if (icr & ICR_LSC) {
7562 		/* Check PCS */
7563 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
7564 		if ((reg & PCS_LSTS_LINKOK) != 0) {
7565 			mii->mii_media_status |= IFM_ACTIVE;
7566 			sc->sc_tbi_linkup = 1;
7567 		} else {
7568 			mii->mii_media_status |= IFM_NONE;
7569 			sc->sc_tbi_linkup = 0;
7570 			wm_tbi_serdes_set_linkled(sc);
7571 			return;
7572 		}
7573 		mii->mii_media_active |= IFM_1000_SX;
7574 		if ((reg & PCS_LSTS_FDX) != 0)
7575 			mii->mii_media_active |= IFM_FDX;
7576 		else
7577 			mii->mii_media_active |= IFM_HDX;
7578 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7579 			/* Check flow */
7580 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
7581 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
7582 				DPRINTF(WM_DEBUG_LINK,
7583 				    ("XXX LINKOK but not ACOMP\n"));
7584 				return;
7585 			}
7586 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7587 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7588 			DPRINTF(WM_DEBUG_LINK,
7589 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7590 			if ((pcs_adv & TXCW_SYM_PAUSE)
7591 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
7592 				mii->mii_media_active |= IFM_FLOW
7593 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7594 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7595 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7596 			    && (pcs_lpab & TXCW_SYM_PAUSE)
7597 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7598 				mii->mii_media_active |= IFM_FLOW
7599 				    | IFM_ETH_TXPAUSE;
7600 			else if ((pcs_adv & TXCW_SYM_PAUSE)
7601 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7602 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7603 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7604 				mii->mii_media_active |= IFM_FLOW
7605 				    | IFM_ETH_RXPAUSE;
7606 		}
7607 		/* Update LED */
7608 		wm_tbi_serdes_set_linkled(sc);
7609 	} else {
7610 		DPRINTF(WM_DEBUG_LINK,
7611 		    ("%s: LINK: Receive sequence error\n",
7612 		    device_xname(sc->sc_dev)));
7613 	}
7614 }
7615 
7616 /*
7617  * wm_linkintr:
7618  *
7619  *	Helper; handle link interrupts.
7620  */
7621 static void
7622 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7623 {
7624 
7625 	KASSERT(WM_CORE_LOCKED(sc));
7626 
7627 	if (sc->sc_flags & WM_F_HAS_MII)
7628 		wm_linkintr_gmii(sc, icr);
7629 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7630 	    && (sc->sc_type >= WM_T_82575))
7631 		wm_linkintr_serdes(sc, icr);
7632 	else
7633 		wm_linkintr_tbi(sc, icr);
7634 }
7635 
7636 /*
7637  * wm_intr_legacy:
7638  *
7639  *	Interrupt service routine for INTx and MSI.
7640  */
7641 static int
7642 wm_intr_legacy(void *arg)
7643 {
7644 	struct wm_softc *sc = arg;
7645 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7646 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
7647 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7648 	uint32_t icr, rndval = 0;
7649 	int handled = 0;
7650 
7651 	DPRINTF(WM_DEBUG_TX,
7652 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7653 	while (1 /* CONSTCOND */) {
7654 		icr = CSR_READ(sc, WMREG_ICR);
7655 		if ((icr & sc->sc_icr) == 0)
7656 			break;
7657 		if (rndval == 0)
7658 			rndval = icr;
7659 
7660 		mutex_enter(rxq->rxq_lock);
7661 
7662 		if (sc->sc_stopping) {
7663 			mutex_exit(rxq->rxq_lock);
7664 			break;
7665 		}
7666 
7667 		handled = 1;
7668 
7669 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7670 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7671 			DPRINTF(WM_DEBUG_RX,
7672 			    ("%s: RX: got Rx intr 0x%08x\n",
7673 			    device_xname(sc->sc_dev),
7674 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
7675 			WM_Q_EVCNT_INCR(rxq, rxintr);
7676 		}
7677 #endif
7678 		wm_rxeof(rxq);
7679 
7680 		mutex_exit(rxq->rxq_lock);
7681 		mutex_enter(txq->txq_lock);
7682 
7683 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7684 		if (icr & ICR_TXDW) {
7685 			DPRINTF(WM_DEBUG_TX,
7686 			    ("%s: TX: got TXDW interrupt\n",
7687 			    device_xname(sc->sc_dev)));
7688 			WM_Q_EVCNT_INCR(txq, txdw);
7689 		}
7690 #endif
7691 		wm_txeof(sc, txq);
7692 
7693 		mutex_exit(txq->txq_lock);
7694 		WM_CORE_LOCK(sc);
7695 
7696 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
7697 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7698 			wm_linkintr(sc, icr);
7699 		}
7700 
7701 		WM_CORE_UNLOCK(sc);
7702 
7703 		if (icr & ICR_RXO) {
7704 #if defined(WM_DEBUG)
7705 			log(LOG_WARNING, "%s: Receive overrun\n",
7706 			    device_xname(sc->sc_dev));
7707 #endif /* defined(WM_DEBUG) */
7708 		}
7709 	}
7710 
7711 	rnd_add_uint32(&sc->rnd_source, rndval);
7712 
7713 	if (handled) {
7714 		/* Try to get more packets going. */
7715 		ifp->if_start(ifp);
7716 	}
7717 
7718 	return handled;
7719 }
7720 
7721 static int
7722 wm_txrxintr_msix(void *arg)
7723 {
7724 	struct wm_queue *wmq = arg;
7725 	struct wm_txqueue *txq = &wmq->wmq_txq;
7726 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
7727 	struct wm_softc *sc = txq->txq_sc;
7728 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7729 
7730 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
7731 
7732 	DPRINTF(WM_DEBUG_TX,
7733 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
7734 
7735 	if (sc->sc_type == WM_T_82574)
7736 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7737 	else if (sc->sc_type == WM_T_82575)
7738 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7739 	else
7740 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
7741 
7742 	if (!sc->sc_stopping) {
7743 		mutex_enter(txq->txq_lock);
7744 
7745 		WM_Q_EVCNT_INCR(txq, txdw);
7746 		wm_txeof(sc, txq);
7747 
7748 		/* Try to get more packets going. */
7749 		if (pcq_peek(txq->txq_interq) != NULL)
7750 			wm_nq_transmit_locked(ifp, txq);
7751 		/*
7752 		 * There are still some upper layer processing which call
7753 		 * ifp->if_start(). e.g. ALTQ
7754 		 */
7755 		if (wmq->wmq_id == 0) {
7756 			if (!IFQ_IS_EMPTY(&ifp->if_snd))
7757 				wm_nq_start_locked(ifp);
7758 		}
7759 		mutex_exit(txq->txq_lock);
7760 	}
7761 
7762 	DPRINTF(WM_DEBUG_RX,
7763 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
7764 
7765 	if (!sc->sc_stopping) {
7766 		mutex_enter(rxq->rxq_lock);
7767 		WM_Q_EVCNT_INCR(rxq, rxintr);
7768 		wm_rxeof(rxq);
7769 		mutex_exit(rxq->rxq_lock);
7770 	}
7771 
7772 	if (sc->sc_type == WM_T_82574)
7773 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
7774 	else if (sc->sc_type == WM_T_82575)
7775 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
7776 	else
7777 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
7778 
7779 	return 1;
7780 }
7781 
7782 /*
7783  * wm_linkintr_msix:
7784  *
7785  *	Interrupt service routine for link status change for MSI-X.
7786  */
7787 static int
7788 wm_linkintr_msix(void *arg)
7789 {
7790 	struct wm_softc *sc = arg;
7791 	uint32_t reg;
7792 
7793 	DPRINTF(WM_DEBUG_LINK,
7794 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
7795 
7796 	reg = CSR_READ(sc, WMREG_ICR);
7797 	WM_CORE_LOCK(sc);
7798 	if ((sc->sc_stopping) || ((reg & ICR_LSC) == 0))
7799 		goto out;
7800 
7801 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7802 	wm_linkintr(sc, ICR_LSC);
7803 
7804 out:
7805 	WM_CORE_UNLOCK(sc);
7806 
7807 	if (sc->sc_type == WM_T_82574)
7808 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
7809 	else if (sc->sc_type == WM_T_82575)
7810 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
7811 	else
7812 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
7813 
7814 	return 1;
7815 }
7816 
7817 /*
7818  * Media related.
7819  * GMII, SGMII, TBI (and SERDES)
7820  */
7821 
7822 /* Common */
7823 
7824 /*
7825  * wm_tbi_serdes_set_linkled:
7826  *
7827  *	Update the link LED on TBI and SERDES devices.
7828  */
7829 static void
7830 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
7831 {
7832 
7833 	if (sc->sc_tbi_linkup)
7834 		sc->sc_ctrl |= CTRL_SWDPIN(0);
7835 	else
7836 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
7837 
7838 	/* 82540 or newer devices are active low */
7839 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
7840 
7841 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7842 }
7843 
7844 /* GMII related */
7845 
7846 /*
7847  * wm_gmii_reset:
7848  *
7849  *	Reset the PHY.
7850  */
7851 static void
7852 wm_gmii_reset(struct wm_softc *sc)
7853 {
7854 	uint32_t reg;
7855 	int rv;
7856 
7857 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
7858 		device_xname(sc->sc_dev), __func__));
7859 
7860 	rv = sc->phy.acquire(sc);
7861 	if (rv != 0) {
7862 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
7863 		    __func__);
7864 		return;
7865 	}
7866 
7867 	switch (sc->sc_type) {
7868 	case WM_T_82542_2_0:
7869 	case WM_T_82542_2_1:
7870 		/* null */
7871 		break;
7872 	case WM_T_82543:
7873 		/*
7874 		 * With 82543, we need to force speed and duplex on the MAC
7875 		 * equal to what the PHY speed and duplex configuration is.
7876 		 * In addition, we need to perform a hardware reset on the PHY
7877 		 * to take it out of reset.
7878 		 */
7879 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
7880 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7881 
7882 		/* The PHY reset pin is active-low. */
7883 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
7884 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
7885 		    CTRL_EXT_SWDPIN(4));
7886 		reg |= CTRL_EXT_SWDPIO(4);
7887 
7888 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
7889 		CSR_WRITE_FLUSH(sc);
7890 		delay(10*1000);
7891 
7892 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
7893 		CSR_WRITE_FLUSH(sc);
7894 		delay(150);
7895 #if 0
7896 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
7897 #endif
7898 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
7899 		break;
7900 	case WM_T_82544:	/* reset 10000us */
7901 	case WM_T_82540:
7902 	case WM_T_82545:
7903 	case WM_T_82545_3:
7904 	case WM_T_82546:
7905 	case WM_T_82546_3:
7906 	case WM_T_82541:
7907 	case WM_T_82541_2:
7908 	case WM_T_82547:
7909 	case WM_T_82547_2:
7910 	case WM_T_82571:	/* reset 100us */
7911 	case WM_T_82572:
7912 	case WM_T_82573:
7913 	case WM_T_82574:
7914 	case WM_T_82575:
7915 	case WM_T_82576:
7916 	case WM_T_82580:
7917 	case WM_T_I350:
7918 	case WM_T_I354:
7919 	case WM_T_I210:
7920 	case WM_T_I211:
7921 	case WM_T_82583:
7922 	case WM_T_80003:
7923 		/* generic reset */
7924 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7925 		CSR_WRITE_FLUSH(sc);
7926 		delay(20000);
7927 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7928 		CSR_WRITE_FLUSH(sc);
7929 		delay(20000);
7930 
7931 		if ((sc->sc_type == WM_T_82541)
7932 		    || (sc->sc_type == WM_T_82541_2)
7933 		    || (sc->sc_type == WM_T_82547)
7934 		    || (sc->sc_type == WM_T_82547_2)) {
7935 			/* workaround for igp are done in igp_reset() */
7936 			/* XXX add code to set LED after phy reset */
7937 		}
7938 		break;
7939 	case WM_T_ICH8:
7940 	case WM_T_ICH9:
7941 	case WM_T_ICH10:
7942 	case WM_T_PCH:
7943 	case WM_T_PCH2:
7944 	case WM_T_PCH_LPT:
7945 	case WM_T_PCH_SPT:
7946 		/* generic reset */
7947 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
7948 		CSR_WRITE_FLUSH(sc);
7949 		delay(100);
7950 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7951 		CSR_WRITE_FLUSH(sc);
7952 		delay(150);
7953 		break;
7954 	default:
7955 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
7956 		    __func__);
7957 		break;
7958 	}
7959 
7960 	sc->phy.release(sc);
7961 
7962 	/* get_cfg_done */
7963 	wm_get_cfg_done(sc);
7964 
7965 	/* extra setup */
7966 	switch (sc->sc_type) {
7967 	case WM_T_82542_2_0:
7968 	case WM_T_82542_2_1:
7969 	case WM_T_82543:
7970 	case WM_T_82544:
7971 	case WM_T_82540:
7972 	case WM_T_82545:
7973 	case WM_T_82545_3:
7974 	case WM_T_82546:
7975 	case WM_T_82546_3:
7976 	case WM_T_82541_2:
7977 	case WM_T_82547_2:
7978 	case WM_T_82571:
7979 	case WM_T_82572:
7980 	case WM_T_82573:
7981 	case WM_T_82575:
7982 	case WM_T_82576:
7983 	case WM_T_82580:
7984 	case WM_T_I350:
7985 	case WM_T_I354:
7986 	case WM_T_I210:
7987 	case WM_T_I211:
7988 	case WM_T_80003:
7989 		/* null */
7990 		break;
7991 	case WM_T_82574:
7992 	case WM_T_82583:
7993 		wm_lplu_d0_disable(sc);
7994 		break;
7995 	case WM_T_82541:
7996 	case WM_T_82547:
7997 		/* XXX Configure actively LED after PHY reset */
7998 		break;
7999 	case WM_T_ICH8:
8000 	case WM_T_ICH9:
8001 	case WM_T_ICH10:
8002 	case WM_T_PCH:
8003 	case WM_T_PCH2:
8004 	case WM_T_PCH_LPT:
8005 	case WM_T_PCH_SPT:
8006 		/* Allow time for h/w to get to a quiescent state afer reset */
8007 		delay(10*1000);
8008 
8009 		if (sc->sc_type == WM_T_PCH)
8010 			wm_hv_phy_workaround_ich8lan(sc);
8011 
8012 		if (sc->sc_type == WM_T_PCH2)
8013 			wm_lv_phy_workaround_ich8lan(sc);
8014 
8015 		if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)) {
8016 			/*
8017 			 * dummy read to clear the phy wakeup bit after lcd
8018 			 * reset
8019 			 */
8020 			reg = wm_gmii_hv_readreg(sc->sc_dev, 1, BM_WUC);
8021 		}
8022 
8023 		/*
8024 		 * XXX Configure the LCD with th extended configuration region
8025 		 * in NVM
8026 		 */
8027 
8028 		/* Disable D0 LPLU. */
8029 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
8030 			wm_lplu_d0_disable_pch(sc);
8031 		else
8032 			wm_lplu_d0_disable(sc);	/* ICH* */
8033 		break;
8034 	default:
8035 		panic("%s: unknown type\n", __func__);
8036 		break;
8037 	}
8038 }
8039 
8040 /*
8041  * wm_get_phy_id_82575:
8042  *
8043  * Return PHY ID. Return -1 if it failed.
8044  */
8045 static int
8046 wm_get_phy_id_82575(struct wm_softc *sc)
8047 {
8048 	uint32_t reg;
8049 	int phyid = -1;
8050 
8051 	/* XXX */
8052 	if ((sc->sc_flags & WM_F_SGMII) == 0)
8053 		return -1;
8054 
8055 	if (wm_sgmii_uses_mdio(sc)) {
8056 		switch (sc->sc_type) {
8057 		case WM_T_82575:
8058 		case WM_T_82576:
8059 			reg = CSR_READ(sc, WMREG_MDIC);
8060 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
8061 			break;
8062 		case WM_T_82580:
8063 		case WM_T_I350:
8064 		case WM_T_I354:
8065 		case WM_T_I210:
8066 		case WM_T_I211:
8067 			reg = CSR_READ(sc, WMREG_MDICNFG);
8068 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8069 			break;
8070 		default:
8071 			return -1;
8072 		}
8073 	}
8074 
8075 	return phyid;
8076 }
8077 
8078 
8079 /*
8080  * wm_gmii_mediainit:
8081  *
8082  *	Initialize media for use on 1000BASE-T devices.
8083  */
8084 static void
8085 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8086 {
8087 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8088 	struct mii_data *mii = &sc->sc_mii;
8089 	uint32_t reg;
8090 
8091 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
8092 		device_xname(sc->sc_dev), __func__));
8093 
8094 	/* We have GMII. */
8095 	sc->sc_flags |= WM_F_HAS_MII;
8096 
8097 	if (sc->sc_type == WM_T_80003)
8098 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
8099 	else
8100 		sc->sc_tipg = TIPG_1000T_DFLT;
8101 
8102 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8103 	if ((sc->sc_type == WM_T_82580)
8104 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8105 	    || (sc->sc_type == WM_T_I211)) {
8106 		reg = CSR_READ(sc, WMREG_PHPM);
8107 		reg &= ~PHPM_GO_LINK_D;
8108 		CSR_WRITE(sc, WMREG_PHPM, reg);
8109 	}
8110 
8111 	/*
8112 	 * Let the chip set speed/duplex on its own based on
8113 	 * signals from the PHY.
8114 	 * XXXbouyer - I'm not sure this is right for the 80003,
8115 	 * the em driver only sets CTRL_SLU here - but it seems to work.
8116 	 */
8117 	sc->sc_ctrl |= CTRL_SLU;
8118 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8119 
8120 	/* Initialize our media structures and probe the GMII. */
8121 	mii->mii_ifp = ifp;
8122 
8123 	/*
8124 	 * Determine the PHY access method.
8125 	 *
8126 	 *  For SGMII, use SGMII specific method.
8127 	 *
8128 	 *  For some devices, we can determine the PHY access method
8129 	 * from sc_type.
8130 	 *
8131 	 *  For ICH and PCH variants, it's difficult to determine the PHY
8132 	 * access  method by sc_type, so use the PCI product ID for some
8133 	 * devices.
8134 	 * For other ICH8 variants, try to use igp's method. If the PHY
8135 	 * can't detect, then use bm's method.
8136 	 */
8137 	switch (prodid) {
8138 	case PCI_PRODUCT_INTEL_PCH_M_LM:
8139 	case PCI_PRODUCT_INTEL_PCH_M_LC:
8140 		/* 82577 */
8141 		sc->sc_phytype = WMPHY_82577;
8142 		break;
8143 	case PCI_PRODUCT_INTEL_PCH_D_DM:
8144 	case PCI_PRODUCT_INTEL_PCH_D_DC:
8145 		/* 82578 */
8146 		sc->sc_phytype = WMPHY_82578;
8147 		break;
8148 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8149 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
8150 		/* 82579 */
8151 		sc->sc_phytype = WMPHY_82579;
8152 		break;
8153 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
8154 	case PCI_PRODUCT_INTEL_82801I_BM:
8155 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8156 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8157 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8158 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8159 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8160 		/* ICH8, 9, 10 with 82567 */
8161 		sc->sc_phytype = WMPHY_BM;
8162 		mii->mii_readreg = wm_gmii_bm_readreg;
8163 		mii->mii_writereg = wm_gmii_bm_writereg;
8164 		break;
8165 	default:
8166 		if (((sc->sc_flags & WM_F_SGMII) != 0)
8167 		    && !wm_sgmii_uses_mdio(sc)){
8168 			/* SGMII */
8169 			mii->mii_readreg = wm_sgmii_readreg;
8170 			mii->mii_writereg = wm_sgmii_writereg;
8171 		} else if (sc->sc_type >= WM_T_ICH8) {
8172 			/* non-82567 ICH8, 9 and 10 */
8173 			mii->mii_readreg = wm_gmii_i82544_readreg;
8174 			mii->mii_writereg = wm_gmii_i82544_writereg;
8175 		} else if (sc->sc_type >= WM_T_80003) {
8176 			/* 80003 */
8177 			mii->mii_readreg = wm_gmii_i80003_readreg;
8178 			mii->mii_writereg = wm_gmii_i80003_writereg;
8179 		} else if (sc->sc_type >= WM_T_I210) {
8180 			/* I210 and I211 */
8181 			mii->mii_readreg = wm_gmii_gs40g_readreg;
8182 			mii->mii_writereg = wm_gmii_gs40g_writereg;
8183 		} else if (sc->sc_type >= WM_T_82580) {
8184 			/* 82580, I350 and I354 */
8185 			sc->sc_phytype = WMPHY_82580;
8186 			mii->mii_readreg = wm_gmii_82580_readreg;
8187 			mii->mii_writereg = wm_gmii_82580_writereg;
8188 		} else if (sc->sc_type >= WM_T_82544) {
8189 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
8190 			mii->mii_readreg = wm_gmii_i82544_readreg;
8191 			mii->mii_writereg = wm_gmii_i82544_writereg;
8192 		} else {
8193 			mii->mii_readreg = wm_gmii_i82543_readreg;
8194 			mii->mii_writereg = wm_gmii_i82543_writereg;
8195 		}
8196 		break;
8197 	}
8198 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8199 		/* All PCH* use _hv_ */
8200 		mii->mii_readreg = wm_gmii_hv_readreg;
8201 		mii->mii_writereg = wm_gmii_hv_writereg;
8202 	}
8203 	mii->mii_statchg = wm_gmii_statchg;
8204 
8205 	wm_gmii_reset(sc);
8206 
8207 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
8208 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8209 	    wm_gmii_mediastatus);
8210 
8211 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8212 	    || (sc->sc_type == WM_T_82580)
8213 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8214 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8215 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
8216 			/* Attach only one port */
8217 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8218 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
8219 		} else {
8220 			int i, id;
8221 			uint32_t ctrl_ext;
8222 
8223 			id = wm_get_phy_id_82575(sc);
8224 			if (id != -1) {
8225 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8226 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8227 			}
8228 			if ((id == -1)
8229 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8230 				/* Power on sgmii phy if it is disabled */
8231 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8232 				CSR_WRITE(sc, WMREG_CTRL_EXT,
8233 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8234 				CSR_WRITE_FLUSH(sc);
8235 				delay(300*1000); /* XXX too long */
8236 
8237 				/* from 1 to 8 */
8238 				for (i = 1; i < 8; i++)
8239 					mii_attach(sc->sc_dev, &sc->sc_mii,
8240 					    0xffffffff, i, MII_OFFSET_ANY,
8241 					    MIIF_DOPAUSE);
8242 
8243 				/* restore previous sfp cage power state */
8244 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8245 			}
8246 		}
8247 	} else {
8248 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8249 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8250 	}
8251 
8252 	/*
8253 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8254 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8255 	 */
8256 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8257 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
8258 		wm_set_mdio_slow_mode_hv(sc);
8259 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8260 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8261 	}
8262 
8263 	/*
8264 	 * (For ICH8 variants)
8265 	 * If PHY detection failed, use BM's r/w function and retry.
8266 	 */
8267 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8268 		/* if failed, retry with *_bm_* */
8269 		mii->mii_readreg = wm_gmii_bm_readreg;
8270 		mii->mii_writereg = wm_gmii_bm_writereg;
8271 
8272 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8273 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8274 	}
8275 
8276 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8277 		/* Any PHY wasn't find */
8278 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8279 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8280 		sc->sc_phytype = WMPHY_NONE;
8281 	} else {
8282 		/*
8283 		 * PHY Found!
8284 		 * Check PHY type.
8285 		 */
8286 		uint32_t model;
8287 		struct mii_softc *child;
8288 
8289 		child = LIST_FIRST(&mii->mii_phys);
8290 		model = child->mii_mpd_model;
8291 		if (model == MII_MODEL_yyINTEL_I82566)
8292 			sc->sc_phytype = WMPHY_IGP_3;
8293 
8294 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8295 	}
8296 }
8297 
8298 /*
8299  * wm_gmii_mediachange:	[ifmedia interface function]
8300  *
8301  *	Set hardware to newly-selected media on a 1000BASE-T device.
8302  */
8303 static int
8304 wm_gmii_mediachange(struct ifnet *ifp)
8305 {
8306 	struct wm_softc *sc = ifp->if_softc;
8307 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8308 	int rc;
8309 
8310 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
8311 		device_xname(sc->sc_dev), __func__));
8312 	if ((ifp->if_flags & IFF_UP) == 0)
8313 		return 0;
8314 
8315 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8316 	sc->sc_ctrl |= CTRL_SLU;
8317 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8318 	    || (sc->sc_type > WM_T_82543)) {
8319 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8320 	} else {
8321 		sc->sc_ctrl &= ~CTRL_ASDE;
8322 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8323 		if (ife->ifm_media & IFM_FDX)
8324 			sc->sc_ctrl |= CTRL_FD;
8325 		switch (IFM_SUBTYPE(ife->ifm_media)) {
8326 		case IFM_10_T:
8327 			sc->sc_ctrl |= CTRL_SPEED_10;
8328 			break;
8329 		case IFM_100_TX:
8330 			sc->sc_ctrl |= CTRL_SPEED_100;
8331 			break;
8332 		case IFM_1000_T:
8333 			sc->sc_ctrl |= CTRL_SPEED_1000;
8334 			break;
8335 		default:
8336 			panic("wm_gmii_mediachange: bad media 0x%x",
8337 			    ife->ifm_media);
8338 		}
8339 	}
8340 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8341 	if (sc->sc_type <= WM_T_82543)
8342 		wm_gmii_reset(sc);
8343 
8344 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8345 		return 0;
8346 	return rc;
8347 }
8348 
8349 /*
8350  * wm_gmii_mediastatus:	[ifmedia interface function]
8351  *
8352  *	Get the current interface media status on a 1000BASE-T device.
8353  */
8354 static void
8355 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8356 {
8357 	struct wm_softc *sc = ifp->if_softc;
8358 
8359 	ether_mediastatus(ifp, ifmr);
8360 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8361 	    | sc->sc_flowflags;
8362 }
8363 
8364 #define	MDI_IO		CTRL_SWDPIN(2)
8365 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
8366 #define	MDI_CLK		CTRL_SWDPIN(3)
8367 
8368 static void
8369 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8370 {
8371 	uint32_t i, v;
8372 
8373 	v = CSR_READ(sc, WMREG_CTRL);
8374 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8375 	v |= MDI_DIR | CTRL_SWDPIO(3);
8376 
8377 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8378 		if (data & i)
8379 			v |= MDI_IO;
8380 		else
8381 			v &= ~MDI_IO;
8382 		CSR_WRITE(sc, WMREG_CTRL, v);
8383 		CSR_WRITE_FLUSH(sc);
8384 		delay(10);
8385 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8386 		CSR_WRITE_FLUSH(sc);
8387 		delay(10);
8388 		CSR_WRITE(sc, WMREG_CTRL, v);
8389 		CSR_WRITE_FLUSH(sc);
8390 		delay(10);
8391 	}
8392 }
8393 
8394 static uint32_t
8395 wm_i82543_mii_recvbits(struct wm_softc *sc)
8396 {
8397 	uint32_t v, i, data = 0;
8398 
8399 	v = CSR_READ(sc, WMREG_CTRL);
8400 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8401 	v |= CTRL_SWDPIO(3);
8402 
8403 	CSR_WRITE(sc, WMREG_CTRL, v);
8404 	CSR_WRITE_FLUSH(sc);
8405 	delay(10);
8406 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8407 	CSR_WRITE_FLUSH(sc);
8408 	delay(10);
8409 	CSR_WRITE(sc, WMREG_CTRL, v);
8410 	CSR_WRITE_FLUSH(sc);
8411 	delay(10);
8412 
8413 	for (i = 0; i < 16; i++) {
8414 		data <<= 1;
8415 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8416 		CSR_WRITE_FLUSH(sc);
8417 		delay(10);
8418 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8419 			data |= 1;
8420 		CSR_WRITE(sc, WMREG_CTRL, v);
8421 		CSR_WRITE_FLUSH(sc);
8422 		delay(10);
8423 	}
8424 
8425 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8426 	CSR_WRITE_FLUSH(sc);
8427 	delay(10);
8428 	CSR_WRITE(sc, WMREG_CTRL, v);
8429 	CSR_WRITE_FLUSH(sc);
8430 	delay(10);
8431 
8432 	return data;
8433 }
8434 
8435 #undef MDI_IO
8436 #undef MDI_DIR
8437 #undef MDI_CLK
8438 
8439 /*
8440  * wm_gmii_i82543_readreg:	[mii interface function]
8441  *
8442  *	Read a PHY register on the GMII (i82543 version).
8443  */
8444 static int
8445 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8446 {
8447 	struct wm_softc *sc = device_private(self);
8448 	int rv;
8449 
8450 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8451 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8452 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8453 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8454 
8455 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8456 	    device_xname(sc->sc_dev), phy, reg, rv));
8457 
8458 	return rv;
8459 }
8460 
8461 /*
8462  * wm_gmii_i82543_writereg:	[mii interface function]
8463  *
8464  *	Write a PHY register on the GMII (i82543 version).
8465  */
8466 static void
8467 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8468 {
8469 	struct wm_softc *sc = device_private(self);
8470 
8471 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8472 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8473 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8474 	    (MII_COMMAND_START << 30), 32);
8475 }
8476 
8477 /*
8478  * wm_gmii_mdic_readreg:	[mii interface function]
8479  *
8480  *	Read a PHY register on the GMII.
8481  */
8482 static int
8483 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
8484 {
8485 	struct wm_softc *sc = device_private(self);
8486 	uint32_t mdic = 0;
8487 	int i, rv;
8488 
8489 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8490 	    MDIC_REGADD(reg));
8491 
8492 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8493 		mdic = CSR_READ(sc, WMREG_MDIC);
8494 		if (mdic & MDIC_READY)
8495 			break;
8496 		delay(50);
8497 	}
8498 
8499 	if ((mdic & MDIC_READY) == 0) {
8500 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8501 		    device_xname(sc->sc_dev), phy, reg);
8502 		rv = 0;
8503 	} else if (mdic & MDIC_E) {
8504 #if 0 /* This is normal if no PHY is present. */
8505 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8506 		    device_xname(sc->sc_dev), phy, reg);
8507 #endif
8508 		rv = 0;
8509 	} else {
8510 		rv = MDIC_DATA(mdic);
8511 		if (rv == 0xffff)
8512 			rv = 0;
8513 	}
8514 
8515 	return rv;
8516 }
8517 
8518 /*
8519  * wm_gmii_mdic_writereg:	[mii interface function]
8520  *
8521  *	Write a PHY register on the GMII.
8522  */
8523 static void
8524 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
8525 {
8526 	struct wm_softc *sc = device_private(self);
8527 	uint32_t mdic = 0;
8528 	int i;
8529 
8530 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8531 	    MDIC_REGADD(reg) | MDIC_DATA(val));
8532 
8533 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8534 		mdic = CSR_READ(sc, WMREG_MDIC);
8535 		if (mdic & MDIC_READY)
8536 			break;
8537 		delay(50);
8538 	}
8539 
8540 	if ((mdic & MDIC_READY) == 0)
8541 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8542 		    device_xname(sc->sc_dev), phy, reg);
8543 	else if (mdic & MDIC_E)
8544 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8545 		    device_xname(sc->sc_dev), phy, reg);
8546 }
8547 
8548 /*
8549  * wm_gmii_i82544_readreg:	[mii interface function]
8550  *
8551  *	Read a PHY register on the GMII.
8552  */
8553 static int
8554 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8555 {
8556 	struct wm_softc *sc = device_private(self);
8557 	int rv;
8558 
8559 	if (sc->phy.acquire(sc)) {
8560 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8561 		    __func__);
8562 		return 0;
8563 	}
8564 	rv = wm_gmii_mdic_readreg(self, phy, reg);
8565 	sc->phy.release(sc);
8566 
8567 	return rv;
8568 }
8569 
8570 /*
8571  * wm_gmii_i82544_writereg:	[mii interface function]
8572  *
8573  *	Write a PHY register on the GMII.
8574  */
8575 static void
8576 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8577 {
8578 	struct wm_softc *sc = device_private(self);
8579 
8580 	if (sc->phy.acquire(sc)) {
8581 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8582 		    __func__);
8583 	}
8584 	wm_gmii_mdic_writereg(self, phy, reg, val);
8585 	sc->phy.release(sc);
8586 }
8587 
8588 /*
8589  * wm_gmii_i80003_readreg:	[mii interface function]
8590  *
8591  *	Read a PHY register on the kumeran
8592  * This could be handled by the PHY layer if we didn't have to lock the
8593  * ressource ...
8594  */
8595 static int
8596 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8597 {
8598 	struct wm_softc *sc = device_private(self);
8599 	int rv;
8600 
8601 	if (phy != 1) /* only one PHY on kumeran bus */
8602 		return 0;
8603 
8604 	if (sc->phy.acquire(sc)) {
8605 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8606 		    __func__);
8607 		return 0;
8608 	}
8609 
8610 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8611 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8612 		    reg >> GG82563_PAGE_SHIFT);
8613 	} else {
8614 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8615 		    reg >> GG82563_PAGE_SHIFT);
8616 	}
8617 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8618 	delay(200);
8619 	rv = wm_gmii_mdic_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8620 	delay(200);
8621 	sc->phy.release(sc);
8622 
8623 	return rv;
8624 }
8625 
8626 /*
8627  * wm_gmii_i80003_writereg:	[mii interface function]
8628  *
8629  *	Write a PHY register on the kumeran.
8630  * This could be handled by the PHY layer if we didn't have to lock the
8631  * ressource ...
8632  */
8633 static void
8634 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8635 {
8636 	struct wm_softc *sc = device_private(self);
8637 
8638 	if (phy != 1) /* only one PHY on kumeran bus */
8639 		return;
8640 
8641 	if (sc->phy.acquire(sc)) {
8642 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8643 		    __func__);
8644 		return;
8645 	}
8646 
8647 	if ((reg & GG82563_MAX_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
8648 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8649 		    reg >> GG82563_PAGE_SHIFT);
8650 	} else {
8651 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8652 		    reg >> GG82563_PAGE_SHIFT);
8653 	}
8654 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8655 	delay(200);
8656 	wm_gmii_mdic_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8657 	delay(200);
8658 
8659 	sc->phy.release(sc);
8660 }
8661 
8662 /*
8663  * wm_gmii_bm_readreg:	[mii interface function]
8664  *
8665  *	Read a PHY register on the kumeran
8666  * This could be handled by the PHY layer if we didn't have to lock the
8667  * ressource ...
8668  */
8669 static int
8670 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8671 {
8672 	struct wm_softc *sc = device_private(self);
8673 	int rv;
8674 
8675 	if (sc->phy.acquire(sc)) {
8676 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8677 		    __func__);
8678 		return 0;
8679 	}
8680 
8681 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8682 		if (phy == 1)
8683 			wm_gmii_mdic_writereg(self, phy,
8684 			    MII_IGPHY_PAGE_SELECT, reg);
8685 		else
8686 			wm_gmii_mdic_writereg(self, phy,
8687 			    GG82563_PHY_PAGE_SELECT,
8688 			    reg >> GG82563_PAGE_SHIFT);
8689 	}
8690 
8691 	rv = wm_gmii_mdic_readreg(self, phy, reg & GG82563_MAX_REG_ADDRESS);
8692 	sc->phy.release(sc);
8693 	return rv;
8694 }
8695 
8696 /*
8697  * wm_gmii_bm_writereg:	[mii interface function]
8698  *
8699  *	Write a PHY register on the kumeran.
8700  * This could be handled by the PHY layer if we didn't have to lock the
8701  * ressource ...
8702  */
8703 static void
8704 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
8705 {
8706 	struct wm_softc *sc = device_private(self);
8707 
8708 	if (sc->phy.acquire(sc)) {
8709 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8710 		    __func__);
8711 		return;
8712 	}
8713 
8714 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8715 		if (phy == 1)
8716 			wm_gmii_mdic_writereg(self, phy,
8717 			    MII_IGPHY_PAGE_SELECT, reg);
8718 		else
8719 			wm_gmii_mdic_writereg(self, phy,
8720 			    GG82563_PHY_PAGE_SELECT,
8721 			    reg >> GG82563_PAGE_SHIFT);
8722 	}
8723 
8724 	wm_gmii_mdic_writereg(self, phy, reg & GG82563_MAX_REG_ADDRESS, val);
8725 	sc->phy.release(sc);
8726 }
8727 
8728 static void
8729 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
8730 {
8731 	struct wm_softc *sc = device_private(self);
8732 	uint16_t regnum = BM_PHY_REG_NUM(offset);
8733 	uint16_t wuce;
8734 
8735 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
8736 		device_xname(sc->sc_dev), __func__));
8737 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
8738 	if (sc->sc_type == WM_T_PCH) {
8739 		/* XXX e1000 driver do nothing... why? */
8740 	}
8741 
8742 	/* Set page 769 */
8743 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8744 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8745 
8746 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
8747 
8748 	wuce &= ~BM_WUC_HOST_WU_BIT;
8749 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG,
8750 	    wuce | BM_WUC_ENABLE_BIT);
8751 
8752 	/* Select page 800 */
8753 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8754 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
8755 
8756 	/* Write page 800 */
8757 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
8758 
8759 	if (rd)
8760 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
8761 	else
8762 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
8763 
8764 	/* Set page 769 */
8765 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8766 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
8767 
8768 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
8769 }
8770 
8771 /*
8772  * wm_gmii_hv_readreg:	[mii interface function]
8773  *
8774  *	Read a PHY register on the kumeran
8775  * This could be handled by the PHY layer if we didn't have to lock the
8776  * ressource ...
8777  */
8778 static int
8779 wm_gmii_hv_readreg(device_t self, int phy, int reg)
8780 {
8781 	struct wm_softc *sc = device_private(self);
8782 	int rv;
8783 
8784 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
8785 		device_xname(sc->sc_dev), __func__));
8786 	if (sc->phy.acquire(sc)) {
8787 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8788 		    __func__);
8789 		return 0;
8790 	}
8791 
8792 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
8793 	sc->phy.release(sc);
8794 	return rv;
8795 }
8796 
8797 static int
8798 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
8799 {
8800 	struct wm_softc *sc = device_private(self);
8801 	uint16_t page = BM_PHY_REG_PAGE(reg);
8802 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8803 	uint16_t val;
8804 	int rv;
8805 
8806 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8807 	if (sc->sc_phytype == WMPHY_82577) {
8808 		/* XXX must write */
8809 	}
8810 
8811 	/* Page 800 works differently than the rest so it has its own func */
8812 	if (page == BM_WUC_PAGE) {
8813 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8814 		return val;
8815 	}
8816 
8817 	/*
8818 	 * Lower than page 768 works differently than the rest so it has its
8819 	 * own func
8820 	 */
8821 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8822 		printf("gmii_hv_readreg!!!\n");
8823 		return 0;
8824 	}
8825 
8826 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8827 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8828 		    page << BME1000_PAGE_SHIFT);
8829 	}
8830 
8831 	rv = wm_gmii_mdic_readreg(self, phy, regnum & IGPHY_MAXREGADDR);
8832 	return rv;
8833 }
8834 
8835 /*
8836  * wm_gmii_hv_writereg:	[mii interface function]
8837  *
8838  *	Write a PHY register on the kumeran.
8839  * This could be handled by the PHY layer if we didn't have to lock the
8840  * ressource ...
8841  */
8842 static void
8843 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
8844 {
8845 	struct wm_softc *sc = device_private(self);
8846 
8847 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
8848 		device_xname(sc->sc_dev), __func__));
8849 
8850 	if (sc->phy.acquire(sc)) {
8851 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8852 		    __func__);
8853 		return;
8854 	}
8855 
8856 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
8857 	sc->phy.release(sc);
8858 }
8859 
8860 static void
8861 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
8862 {
8863 	uint16_t page = BM_PHY_REG_PAGE(reg);
8864 	uint16_t regnum = BM_PHY_REG_NUM(reg);
8865 
8866 	/* XXX Workaround failure in MDIO access while cable is disconnected */
8867 
8868 	/* Page 800 works differently than the rest so it has its own func */
8869 	if (page == BM_WUC_PAGE) {
8870 		uint16_t tmp;
8871 
8872 		tmp = val;
8873 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
8874 		return;
8875 	}
8876 
8877 	/*
8878 	 * Lower than page 768 works differently than the rest so it has its
8879 	 * own func
8880 	 */
8881 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
8882 		printf("gmii_hv_writereg!!!\n");
8883 		return;
8884 	}
8885 
8886 	/*
8887 	 * XXX Workaround MDIO accesses being disabled after entering IEEE
8888 	 * Power Down (whenever bit 11 of the PHY control register is set)
8889 	 */
8890 
8891 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
8892 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
8893 		    page << BME1000_PAGE_SHIFT);
8894 	}
8895 
8896 	wm_gmii_mdic_writereg(self, phy, regnum & IGPHY_MAXREGADDR, val);
8897 }
8898 
8899 /*
8900  * wm_gmii_82580_readreg:	[mii interface function]
8901  *
8902  *	Read a PHY register on the 82580 and I350.
8903  * This could be handled by the PHY layer if we didn't have to lock the
8904  * ressource ...
8905  */
8906 static int
8907 wm_gmii_82580_readreg(device_t self, int phy, int reg)
8908 {
8909 	struct wm_softc *sc = device_private(self);
8910 	int rv;
8911 
8912 	if (sc->phy.acquire(sc) != 0) {
8913 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8914 		    __func__);
8915 		return 0;
8916 	}
8917 
8918 	rv = wm_gmii_mdic_readreg(self, phy, reg);
8919 
8920 	sc->phy.release(sc);
8921 	return rv;
8922 }
8923 
8924 /*
8925  * wm_gmii_82580_writereg:	[mii interface function]
8926  *
8927  *	Write a PHY register on the 82580 and I350.
8928  * This could be handled by the PHY layer if we didn't have to lock the
8929  * ressource ...
8930  */
8931 static void
8932 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
8933 {
8934 	struct wm_softc *sc = device_private(self);
8935 
8936 	if (sc->phy.acquire(sc) != 0) {
8937 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8938 		    __func__);
8939 		return;
8940 	}
8941 
8942 	wm_gmii_mdic_writereg(self, phy, reg, val);
8943 
8944 	sc->phy.release(sc);
8945 }
8946 
8947 /*
8948  * wm_gmii_gs40g_readreg:	[mii interface function]
8949  *
8950  *	Read a PHY register on the I2100 and I211.
8951  * This could be handled by the PHY layer if we didn't have to lock the
8952  * ressource ...
8953  */
8954 static int
8955 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
8956 {
8957 	struct wm_softc *sc = device_private(self);
8958 	int page, offset;
8959 	int rv;
8960 
8961 	/* Acquire semaphore */
8962 	if (sc->phy.acquire(sc)) {
8963 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8964 		    __func__);
8965 		return 0;
8966 	}
8967 
8968 	/* Page select */
8969 	page = reg >> GS40G_PAGE_SHIFT;
8970 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
8971 
8972 	/* Read reg */
8973 	offset = reg & GS40G_OFFSET_MASK;
8974 	rv = wm_gmii_mdic_readreg(self, phy, offset);
8975 
8976 	sc->phy.release(sc);
8977 	return rv;
8978 }
8979 
8980 /*
8981  * wm_gmii_gs40g_writereg:	[mii interface function]
8982  *
8983  *	Write a PHY register on the I210 and I211.
8984  * This could be handled by the PHY layer if we didn't have to lock the
8985  * ressource ...
8986  */
8987 static void
8988 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
8989 {
8990 	struct wm_softc *sc = device_private(self);
8991 	int page, offset;
8992 
8993 	/* Acquire semaphore */
8994 	if (sc->phy.acquire(sc)) {
8995 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8996 		    __func__);
8997 		return;
8998 	}
8999 
9000 	/* Page select */
9001 	page = reg >> GS40G_PAGE_SHIFT;
9002 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9003 
9004 	/* Write reg */
9005 	offset = reg & GS40G_OFFSET_MASK;
9006 	wm_gmii_mdic_writereg(self, phy, offset, val);
9007 
9008 	/* Release semaphore */
9009 	sc->phy.release(sc);
9010 }
9011 
9012 /*
9013  * wm_gmii_statchg:	[mii interface function]
9014  *
9015  *	Callback from MII layer when media changes.
9016  */
9017 static void
9018 wm_gmii_statchg(struct ifnet *ifp)
9019 {
9020 	struct wm_softc *sc = ifp->if_softc;
9021 	struct mii_data *mii = &sc->sc_mii;
9022 
9023 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
9024 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9025 	sc->sc_fcrtl &= ~FCRTL_XONE;
9026 
9027 	/*
9028 	 * Get flow control negotiation result.
9029 	 */
9030 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
9031 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
9032 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
9033 		mii->mii_media_active &= ~IFM_ETH_FMASK;
9034 	}
9035 
9036 	if (sc->sc_flowflags & IFM_FLOW) {
9037 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
9038 			sc->sc_ctrl |= CTRL_TFCE;
9039 			sc->sc_fcrtl |= FCRTL_XONE;
9040 		}
9041 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
9042 			sc->sc_ctrl |= CTRL_RFCE;
9043 	}
9044 
9045 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
9046 		DPRINTF(WM_DEBUG_LINK,
9047 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
9048 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9049 	} else {
9050 		DPRINTF(WM_DEBUG_LINK,
9051 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
9052 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9053 	}
9054 
9055 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9056 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9057 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
9058 						 : WMREG_FCRTL, sc->sc_fcrtl);
9059 	if (sc->sc_type == WM_T_80003) {
9060 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
9061 		case IFM_1000_T:
9062 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9063 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
9064 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
9065 			break;
9066 		default:
9067 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9068 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
9069 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
9070 			break;
9071 		}
9072 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
9073 	}
9074 }
9075 
9076 /*
9077  * wm_kmrn_readreg:
9078  *
9079  *	Read a kumeran register
9080  */
9081 static int
9082 wm_kmrn_readreg(struct wm_softc *sc, int reg)
9083 {
9084 	int rv;
9085 
9086 	if (sc->sc_type == WM_T_80003)
9087 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9088 	else
9089 		rv = sc->phy.acquire(sc);
9090 	if (rv != 0) {
9091 		aprint_error_dev(sc->sc_dev,
9092 		    "%s: failed to get semaphore\n", __func__);
9093 		return 0;
9094 	}
9095 
9096 	rv = wm_kmrn_readreg_locked(sc, reg);
9097 
9098 	if (sc->sc_type == WM_T_80003)
9099 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9100 	else
9101 		sc->phy.release(sc);
9102 
9103 	return rv;
9104 }
9105 
9106 static int
9107 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
9108 {
9109 	int rv;
9110 
9111 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9112 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9113 	    KUMCTRLSTA_REN);
9114 	CSR_WRITE_FLUSH(sc);
9115 	delay(2);
9116 
9117 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
9118 
9119 	return rv;
9120 }
9121 
9122 /*
9123  * wm_kmrn_writereg:
9124  *
9125  *	Write a kumeran register
9126  */
9127 static void
9128 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
9129 {
9130 	int rv;
9131 
9132 	if (sc->sc_type == WM_T_80003)
9133 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9134 	else
9135 		rv = sc->phy.acquire(sc);
9136 	if (rv != 0) {
9137 		aprint_error_dev(sc->sc_dev,
9138 		    "%s: failed to get semaphore\n", __func__);
9139 		return;
9140 	}
9141 
9142 	wm_kmrn_writereg_locked(sc, reg, val);
9143 
9144 	if (sc->sc_type == WM_T_80003)
9145 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9146 	else
9147 		sc->phy.release(sc);
9148 }
9149 
9150 static void
9151 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
9152 {
9153 
9154 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9155 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9156 	    (val & KUMCTRLSTA_MASK));
9157 }
9158 
9159 /* SGMII related */
9160 
9161 /*
9162  * wm_sgmii_uses_mdio
9163  *
9164  * Check whether the transaction is to the internal PHY or the external
9165  * MDIO interface. Return true if it's MDIO.
9166  */
9167 static bool
9168 wm_sgmii_uses_mdio(struct wm_softc *sc)
9169 {
9170 	uint32_t reg;
9171 	bool ismdio = false;
9172 
9173 	switch (sc->sc_type) {
9174 	case WM_T_82575:
9175 	case WM_T_82576:
9176 		reg = CSR_READ(sc, WMREG_MDIC);
9177 		ismdio = ((reg & MDIC_DEST) != 0);
9178 		break;
9179 	case WM_T_82580:
9180 	case WM_T_I350:
9181 	case WM_T_I354:
9182 	case WM_T_I210:
9183 	case WM_T_I211:
9184 		reg = CSR_READ(sc, WMREG_MDICNFG);
9185 		ismdio = ((reg & MDICNFG_DEST) != 0);
9186 		break;
9187 	default:
9188 		break;
9189 	}
9190 
9191 	return ismdio;
9192 }
9193 
9194 /*
9195  * wm_sgmii_readreg:	[mii interface function]
9196  *
9197  *	Read a PHY register on the SGMII
9198  * This could be handled by the PHY layer if we didn't have to lock the
9199  * ressource ...
9200  */
9201 static int
9202 wm_sgmii_readreg(device_t self, int phy, int reg)
9203 {
9204 	struct wm_softc *sc = device_private(self);
9205 	uint32_t i2ccmd;
9206 	int i, rv;
9207 
9208 	if (sc->phy.acquire(sc)) {
9209 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9210 		    __func__);
9211 		return 0;
9212 	}
9213 
9214 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9215 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9216 	    | I2CCMD_OPCODE_READ;
9217 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9218 
9219 	/* Poll the ready bit */
9220 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9221 		delay(50);
9222 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9223 		if (i2ccmd & I2CCMD_READY)
9224 			break;
9225 	}
9226 	if ((i2ccmd & I2CCMD_READY) == 0)
9227 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
9228 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9229 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9230 
9231 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9232 
9233 	sc->phy.release(sc);
9234 	return rv;
9235 }
9236 
9237 /*
9238  * wm_sgmii_writereg:	[mii interface function]
9239  *
9240  *	Write a PHY register on the SGMII.
9241  * This could be handled by the PHY layer if we didn't have to lock the
9242  * ressource ...
9243  */
9244 static void
9245 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9246 {
9247 	struct wm_softc *sc = device_private(self);
9248 	uint32_t i2ccmd;
9249 	int i;
9250 	int val_swapped;
9251 
9252 	if (sc->phy.acquire(sc) != 0) {
9253 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9254 		    __func__);
9255 		return;
9256 	}
9257 	/* Swap the data bytes for the I2C interface */
9258 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9259 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9260 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9261 	    | I2CCMD_OPCODE_WRITE | val_swapped;
9262 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9263 
9264 	/* Poll the ready bit */
9265 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9266 		delay(50);
9267 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9268 		if (i2ccmd & I2CCMD_READY)
9269 			break;
9270 	}
9271 	if ((i2ccmd & I2CCMD_READY) == 0)
9272 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9273 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9274 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9275 
9276 	sc->phy.release(sc);
9277 }
9278 
9279 /* TBI related */
9280 
9281 /*
9282  * wm_tbi_mediainit:
9283  *
9284  *	Initialize media for use on 1000BASE-X devices.
9285  */
9286 static void
9287 wm_tbi_mediainit(struct wm_softc *sc)
9288 {
9289 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9290 	const char *sep = "";
9291 
9292 	if (sc->sc_type < WM_T_82543)
9293 		sc->sc_tipg = TIPG_WM_DFLT;
9294 	else
9295 		sc->sc_tipg = TIPG_LG_DFLT;
9296 
9297 	sc->sc_tbi_serdes_anegticks = 5;
9298 
9299 	/* Initialize our media structures */
9300 	sc->sc_mii.mii_ifp = ifp;
9301 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
9302 
9303 	if ((sc->sc_type >= WM_T_82575)
9304 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9305 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9306 		    wm_serdes_mediachange, wm_serdes_mediastatus);
9307 	else
9308 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9309 		    wm_tbi_mediachange, wm_tbi_mediastatus);
9310 
9311 	/*
9312 	 * SWD Pins:
9313 	 *
9314 	 *	0 = Link LED (output)
9315 	 *	1 = Loss Of Signal (input)
9316 	 */
9317 	sc->sc_ctrl |= CTRL_SWDPIO(0);
9318 
9319 	/* XXX Perhaps this is only for TBI */
9320 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9321 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9322 
9323 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9324 		sc->sc_ctrl &= ~CTRL_LRST;
9325 
9326 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9327 
9328 #define	ADD(ss, mm, dd)							\
9329 do {									\
9330 	aprint_normal("%s%s", sep, ss);					\
9331 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9332 	sep = ", ";							\
9333 } while (/*CONSTCOND*/0)
9334 
9335 	aprint_normal_dev(sc->sc_dev, "");
9336 
9337 	/* Only 82545 is LX */
9338 	if (sc->sc_type == WM_T_82545) {
9339 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9340 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9341 	} else {
9342 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9343 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9344 	}
9345 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9346 	aprint_normal("\n");
9347 
9348 #undef ADD
9349 
9350 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9351 }
9352 
9353 /*
9354  * wm_tbi_mediachange:	[ifmedia interface function]
9355  *
9356  *	Set hardware to newly-selected media on a 1000BASE-X device.
9357  */
9358 static int
9359 wm_tbi_mediachange(struct ifnet *ifp)
9360 {
9361 	struct wm_softc *sc = ifp->if_softc;
9362 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9363 	uint32_t status;
9364 	int i;
9365 
9366 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9367 		/* XXX need some work for >= 82571 and < 82575 */
9368 		if (sc->sc_type < WM_T_82575)
9369 			return 0;
9370 	}
9371 
9372 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9373 	    || (sc->sc_type >= WM_T_82575))
9374 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9375 
9376 	sc->sc_ctrl &= ~CTRL_LRST;
9377 	sc->sc_txcw = TXCW_ANE;
9378 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9379 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
9380 	else if (ife->ifm_media & IFM_FDX)
9381 		sc->sc_txcw |= TXCW_FD;
9382 	else
9383 		sc->sc_txcw |= TXCW_HD;
9384 
9385 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9386 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9387 
9388 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9389 		    device_xname(sc->sc_dev), sc->sc_txcw));
9390 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9391 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9392 	CSR_WRITE_FLUSH(sc);
9393 	delay(1000);
9394 
9395 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9396 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9397 
9398 	/*
9399 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9400 	 * optics detect a signal, 0 if they don't.
9401 	 */
9402 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9403 		/* Have signal; wait for the link to come up. */
9404 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9405 			delay(10000);
9406 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9407 				break;
9408 		}
9409 
9410 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9411 			    device_xname(sc->sc_dev),i));
9412 
9413 		status = CSR_READ(sc, WMREG_STATUS);
9414 		DPRINTF(WM_DEBUG_LINK,
9415 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9416 			device_xname(sc->sc_dev),status, STATUS_LU));
9417 		if (status & STATUS_LU) {
9418 			/* Link is up. */
9419 			DPRINTF(WM_DEBUG_LINK,
9420 			    ("%s: LINK: set media -> link up %s\n",
9421 			    device_xname(sc->sc_dev),
9422 			    (status & STATUS_FD) ? "FDX" : "HDX"));
9423 
9424 			/*
9425 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9426 			 * so we should update sc->sc_ctrl
9427 			 */
9428 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9429 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9430 			sc->sc_fcrtl &= ~FCRTL_XONE;
9431 			if (status & STATUS_FD)
9432 				sc->sc_tctl |=
9433 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9434 			else
9435 				sc->sc_tctl |=
9436 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9437 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9438 				sc->sc_fcrtl |= FCRTL_XONE;
9439 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9440 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9441 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
9442 				      sc->sc_fcrtl);
9443 			sc->sc_tbi_linkup = 1;
9444 		} else {
9445 			if (i == WM_LINKUP_TIMEOUT)
9446 				wm_check_for_link(sc);
9447 			/* Link is down. */
9448 			DPRINTF(WM_DEBUG_LINK,
9449 			    ("%s: LINK: set media -> link down\n",
9450 			    device_xname(sc->sc_dev)));
9451 			sc->sc_tbi_linkup = 0;
9452 		}
9453 	} else {
9454 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9455 		    device_xname(sc->sc_dev)));
9456 		sc->sc_tbi_linkup = 0;
9457 	}
9458 
9459 	wm_tbi_serdes_set_linkled(sc);
9460 
9461 	return 0;
9462 }
9463 
9464 /*
9465  * wm_tbi_mediastatus:	[ifmedia interface function]
9466  *
9467  *	Get the current interface media status on a 1000BASE-X device.
9468  */
9469 static void
9470 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9471 {
9472 	struct wm_softc *sc = ifp->if_softc;
9473 	uint32_t ctrl, status;
9474 
9475 	ifmr->ifm_status = IFM_AVALID;
9476 	ifmr->ifm_active = IFM_ETHER;
9477 
9478 	status = CSR_READ(sc, WMREG_STATUS);
9479 	if ((status & STATUS_LU) == 0) {
9480 		ifmr->ifm_active |= IFM_NONE;
9481 		return;
9482 	}
9483 
9484 	ifmr->ifm_status |= IFM_ACTIVE;
9485 	/* Only 82545 is LX */
9486 	if (sc->sc_type == WM_T_82545)
9487 		ifmr->ifm_active |= IFM_1000_LX;
9488 	else
9489 		ifmr->ifm_active |= IFM_1000_SX;
9490 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9491 		ifmr->ifm_active |= IFM_FDX;
9492 	else
9493 		ifmr->ifm_active |= IFM_HDX;
9494 	ctrl = CSR_READ(sc, WMREG_CTRL);
9495 	if (ctrl & CTRL_RFCE)
9496 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9497 	if (ctrl & CTRL_TFCE)
9498 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9499 }
9500 
9501 /* XXX TBI only */
9502 static int
9503 wm_check_for_link(struct wm_softc *sc)
9504 {
9505 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9506 	uint32_t rxcw;
9507 	uint32_t ctrl;
9508 	uint32_t status;
9509 	uint32_t sig;
9510 
9511 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9512 		/* XXX need some work for >= 82571 */
9513 		if (sc->sc_type >= WM_T_82571) {
9514 			sc->sc_tbi_linkup = 1;
9515 			return 0;
9516 		}
9517 	}
9518 
9519 	rxcw = CSR_READ(sc, WMREG_RXCW);
9520 	ctrl = CSR_READ(sc, WMREG_CTRL);
9521 	status = CSR_READ(sc, WMREG_STATUS);
9522 
9523 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9524 
9525 	DPRINTF(WM_DEBUG_LINK,
9526 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9527 		device_xname(sc->sc_dev), __func__,
9528 		((ctrl & CTRL_SWDPIN(1)) == sig),
9529 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9530 
9531 	/*
9532 	 * SWDPIN   LU RXCW
9533 	 *      0    0    0
9534 	 *      0    0    1	(should not happen)
9535 	 *      0    1    0	(should not happen)
9536 	 *      0    1    1	(should not happen)
9537 	 *      1    0    0	Disable autonego and force linkup
9538 	 *      1    0    1	got /C/ but not linkup yet
9539 	 *      1    1    0	(linkup)
9540 	 *      1    1    1	If IFM_AUTO, back to autonego
9541 	 *
9542 	 */
9543 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
9544 	    && ((status & STATUS_LU) == 0)
9545 	    && ((rxcw & RXCW_C) == 0)) {
9546 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9547 			__func__));
9548 		sc->sc_tbi_linkup = 0;
9549 		/* Disable auto-negotiation in the TXCW register */
9550 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9551 
9552 		/*
9553 		 * Force link-up and also force full-duplex.
9554 		 *
9555 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
9556 		 * so we should update sc->sc_ctrl
9557 		 */
9558 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9559 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9560 	} else if (((status & STATUS_LU) != 0)
9561 	    && ((rxcw & RXCW_C) != 0)
9562 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9563 		sc->sc_tbi_linkup = 1;
9564 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9565 			__func__));
9566 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9567 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9568 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9569 	    && ((rxcw & RXCW_C) != 0)) {
9570 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
9571 	} else {
9572 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9573 			status));
9574 	}
9575 
9576 	return 0;
9577 }
9578 
9579 /*
9580  * wm_tbi_tick:
9581  *
9582  *	Check the link on TBI devices.
9583  *	This function acts as mii_tick().
9584  */
9585 static void
9586 wm_tbi_tick(struct wm_softc *sc)
9587 {
9588 	struct mii_data *mii = &sc->sc_mii;
9589 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9590 	uint32_t status;
9591 
9592 	KASSERT(WM_CORE_LOCKED(sc));
9593 
9594 	status = CSR_READ(sc, WMREG_STATUS);
9595 
9596 	/* XXX is this needed? */
9597 	(void)CSR_READ(sc, WMREG_RXCW);
9598 	(void)CSR_READ(sc, WMREG_CTRL);
9599 
9600 	/* set link status */
9601 	if ((status & STATUS_LU) == 0) {
9602 		DPRINTF(WM_DEBUG_LINK,
9603 		    ("%s: LINK: checklink -> down\n",
9604 			device_xname(sc->sc_dev)));
9605 		sc->sc_tbi_linkup = 0;
9606 	} else if (sc->sc_tbi_linkup == 0) {
9607 		DPRINTF(WM_DEBUG_LINK,
9608 		    ("%s: LINK: checklink -> up %s\n",
9609 			device_xname(sc->sc_dev),
9610 			(status & STATUS_FD) ? "FDX" : "HDX"));
9611 		sc->sc_tbi_linkup = 1;
9612 		sc->sc_tbi_serdes_ticks = 0;
9613 	}
9614 
9615 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9616 		goto setled;
9617 
9618 	if ((status & STATUS_LU) == 0) {
9619 		sc->sc_tbi_linkup = 0;
9620 		/* If the timer expired, retry autonegotiation */
9621 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9622 		    && (++sc->sc_tbi_serdes_ticks
9623 			>= sc->sc_tbi_serdes_anegticks)) {
9624 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9625 			sc->sc_tbi_serdes_ticks = 0;
9626 			/*
9627 			 * Reset the link, and let autonegotiation do
9628 			 * its thing
9629 			 */
9630 			sc->sc_ctrl |= CTRL_LRST;
9631 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9632 			CSR_WRITE_FLUSH(sc);
9633 			delay(1000);
9634 			sc->sc_ctrl &= ~CTRL_LRST;
9635 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9636 			CSR_WRITE_FLUSH(sc);
9637 			delay(1000);
9638 			CSR_WRITE(sc, WMREG_TXCW,
9639 			    sc->sc_txcw & ~TXCW_ANE);
9640 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9641 		}
9642 	}
9643 
9644 setled:
9645 	wm_tbi_serdes_set_linkled(sc);
9646 }
9647 
9648 /* SERDES related */
9649 static void
9650 wm_serdes_power_up_link_82575(struct wm_softc *sc)
9651 {
9652 	uint32_t reg;
9653 
9654 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9655 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
9656 		return;
9657 
9658 	reg = CSR_READ(sc, WMREG_PCS_CFG);
9659 	reg |= PCS_CFG_PCS_EN;
9660 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
9661 
9662 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
9663 	reg &= ~CTRL_EXT_SWDPIN(3);
9664 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9665 	CSR_WRITE_FLUSH(sc);
9666 }
9667 
9668 static int
9669 wm_serdes_mediachange(struct ifnet *ifp)
9670 {
9671 	struct wm_softc *sc = ifp->if_softc;
9672 	bool pcs_autoneg = true; /* XXX */
9673 	uint32_t ctrl_ext, pcs_lctl, reg;
9674 
9675 	/* XXX Currently, this function is not called on 8257[12] */
9676 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9677 	    || (sc->sc_type >= WM_T_82575))
9678 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9679 
9680 	wm_serdes_power_up_link_82575(sc);
9681 
9682 	sc->sc_ctrl |= CTRL_SLU;
9683 
9684 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
9685 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
9686 
9687 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9688 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
9689 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
9690 	case CTRL_EXT_LINK_MODE_SGMII:
9691 		pcs_autoneg = true;
9692 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
9693 		break;
9694 	case CTRL_EXT_LINK_MODE_1000KX:
9695 		pcs_autoneg = false;
9696 		/* FALLTHROUGH */
9697 	default:
9698 		if ((sc->sc_type == WM_T_82575)
9699 		    || (sc->sc_type == WM_T_82576)) {
9700 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
9701 				pcs_autoneg = false;
9702 		}
9703 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
9704 		    | CTRL_FRCFDX;
9705 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
9706 	}
9707 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9708 
9709 	if (pcs_autoneg) {
9710 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
9711 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
9712 
9713 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
9714 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
9715 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
9716 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
9717 	} else
9718 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
9719 
9720 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
9721 
9722 
9723 	return 0;
9724 }
9725 
9726 static void
9727 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9728 {
9729 	struct wm_softc *sc = ifp->if_softc;
9730 	struct mii_data *mii = &sc->sc_mii;
9731 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9732 	uint32_t pcs_adv, pcs_lpab, reg;
9733 
9734 	ifmr->ifm_status = IFM_AVALID;
9735 	ifmr->ifm_active = IFM_ETHER;
9736 
9737 	/* Check PCS */
9738 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9739 	if ((reg & PCS_LSTS_LINKOK) == 0) {
9740 		ifmr->ifm_active |= IFM_NONE;
9741 		sc->sc_tbi_linkup = 0;
9742 		goto setled;
9743 	}
9744 
9745 	sc->sc_tbi_linkup = 1;
9746 	ifmr->ifm_status |= IFM_ACTIVE;
9747 	ifmr->ifm_active |= IFM_1000_SX; /* XXX */
9748 	if ((reg & PCS_LSTS_FDX) != 0)
9749 		ifmr->ifm_active |= IFM_FDX;
9750 	else
9751 		ifmr->ifm_active |= IFM_HDX;
9752 	mii->mii_media_active &= ~IFM_ETH_FMASK;
9753 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
9754 		/* Check flow */
9755 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
9756 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
9757 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
9758 			goto setled;
9759 		}
9760 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
9761 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
9762 		DPRINTF(WM_DEBUG_LINK,
9763 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
9764 		if ((pcs_adv & TXCW_SYM_PAUSE)
9765 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
9766 			mii->mii_media_active |= IFM_FLOW
9767 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
9768 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
9769 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9770 		    && (pcs_lpab & TXCW_SYM_PAUSE)
9771 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9772 			mii->mii_media_active |= IFM_FLOW
9773 			    | IFM_ETH_TXPAUSE;
9774 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
9775 		    && (pcs_adv & TXCW_ASYM_PAUSE)
9776 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
9777 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
9778 			mii->mii_media_active |= IFM_FLOW
9779 			    | IFM_ETH_RXPAUSE;
9780 		} else {
9781 		}
9782 	}
9783 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9784 	    | (mii->mii_media_active & IFM_ETH_FMASK);
9785 setled:
9786 	wm_tbi_serdes_set_linkled(sc);
9787 }
9788 
9789 /*
9790  * wm_serdes_tick:
9791  *
9792  *	Check the link on serdes devices.
9793  */
9794 static void
9795 wm_serdes_tick(struct wm_softc *sc)
9796 {
9797 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9798 	struct mii_data *mii = &sc->sc_mii;
9799 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9800 	uint32_t reg;
9801 
9802 	KASSERT(WM_CORE_LOCKED(sc));
9803 
9804 	mii->mii_media_status = IFM_AVALID;
9805 	mii->mii_media_active = IFM_ETHER;
9806 
9807 	/* Check PCS */
9808 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
9809 	if ((reg & PCS_LSTS_LINKOK) != 0) {
9810 		mii->mii_media_status |= IFM_ACTIVE;
9811 		sc->sc_tbi_linkup = 1;
9812 		sc->sc_tbi_serdes_ticks = 0;
9813 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
9814 		if ((reg & PCS_LSTS_FDX) != 0)
9815 			mii->mii_media_active |= IFM_FDX;
9816 		else
9817 			mii->mii_media_active |= IFM_HDX;
9818 	} else {
9819 		mii->mii_media_status |= IFM_NONE;
9820 		sc->sc_tbi_linkup = 0;
9821 		    /* If the timer expired, retry autonegotiation */
9822 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9823 		    && (++sc->sc_tbi_serdes_ticks
9824 			>= sc->sc_tbi_serdes_anegticks)) {
9825 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9826 			sc->sc_tbi_serdes_ticks = 0;
9827 			/* XXX */
9828 			wm_serdes_mediachange(ifp);
9829 		}
9830 	}
9831 
9832 	wm_tbi_serdes_set_linkled(sc);
9833 }
9834 
9835 /* SFP related */
9836 
9837 static int
9838 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
9839 {
9840 	uint32_t i2ccmd;
9841 	int i;
9842 
9843 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
9844 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9845 
9846 	/* Poll the ready bit */
9847 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9848 		delay(50);
9849 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9850 		if (i2ccmd & I2CCMD_READY)
9851 			break;
9852 	}
9853 	if ((i2ccmd & I2CCMD_READY) == 0)
9854 		return -1;
9855 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9856 		return -1;
9857 
9858 	*data = i2ccmd & 0x00ff;
9859 
9860 	return 0;
9861 }
9862 
9863 static uint32_t
9864 wm_sfp_get_media_type(struct wm_softc *sc)
9865 {
9866 	uint32_t ctrl_ext;
9867 	uint8_t val = 0;
9868 	int timeout = 3;
9869 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
9870 	int rv = -1;
9871 
9872 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9873 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
9874 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
9875 	CSR_WRITE_FLUSH(sc);
9876 
9877 	/* Read SFP module data */
9878 	while (timeout) {
9879 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
9880 		if (rv == 0)
9881 			break;
9882 		delay(100*1000); /* XXX too big */
9883 		timeout--;
9884 	}
9885 	if (rv != 0)
9886 		goto out;
9887 	switch (val) {
9888 	case SFF_SFP_ID_SFF:
9889 		aprint_normal_dev(sc->sc_dev,
9890 		    "Module/Connector soldered to board\n");
9891 		break;
9892 	case SFF_SFP_ID_SFP:
9893 		aprint_normal_dev(sc->sc_dev, "SFP\n");
9894 		break;
9895 	case SFF_SFP_ID_UNKNOWN:
9896 		goto out;
9897 	default:
9898 		break;
9899 	}
9900 
9901 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
9902 	if (rv != 0) {
9903 		goto out;
9904 	}
9905 
9906 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
9907 		mediatype = WM_MEDIATYPE_SERDES;
9908 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
9909 		sc->sc_flags |= WM_F_SGMII;
9910 		mediatype = WM_MEDIATYPE_COPPER;
9911 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
9912 		sc->sc_flags |= WM_F_SGMII;
9913 		mediatype = WM_MEDIATYPE_SERDES;
9914 	}
9915 
9916 out:
9917 	/* Restore I2C interface setting */
9918 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9919 
9920 	return mediatype;
9921 }
9922 /*
9923  * NVM related.
9924  * Microwire, SPI (w/wo EERD) and Flash.
9925  */
9926 
9927 /* Both spi and uwire */
9928 
9929 /*
9930  * wm_eeprom_sendbits:
9931  *
9932  *	Send a series of bits to the EEPROM.
9933  */
9934 static void
9935 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
9936 {
9937 	uint32_t reg;
9938 	int x;
9939 
9940 	reg = CSR_READ(sc, WMREG_EECD);
9941 
9942 	for (x = nbits; x > 0; x--) {
9943 		if (bits & (1U << (x - 1)))
9944 			reg |= EECD_DI;
9945 		else
9946 			reg &= ~EECD_DI;
9947 		CSR_WRITE(sc, WMREG_EECD, reg);
9948 		CSR_WRITE_FLUSH(sc);
9949 		delay(2);
9950 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9951 		CSR_WRITE_FLUSH(sc);
9952 		delay(2);
9953 		CSR_WRITE(sc, WMREG_EECD, reg);
9954 		CSR_WRITE_FLUSH(sc);
9955 		delay(2);
9956 	}
9957 }
9958 
9959 /*
9960  * wm_eeprom_recvbits:
9961  *
9962  *	Receive a series of bits from the EEPROM.
9963  */
9964 static void
9965 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
9966 {
9967 	uint32_t reg, val;
9968 	int x;
9969 
9970 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
9971 
9972 	val = 0;
9973 	for (x = nbits; x > 0; x--) {
9974 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
9975 		CSR_WRITE_FLUSH(sc);
9976 		delay(2);
9977 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
9978 			val |= (1U << (x - 1));
9979 		CSR_WRITE(sc, WMREG_EECD, reg);
9980 		CSR_WRITE_FLUSH(sc);
9981 		delay(2);
9982 	}
9983 	*valp = val;
9984 }
9985 
9986 /* Microwire */
9987 
9988 /*
9989  * wm_nvm_read_uwire:
9990  *
9991  *	Read a word from the EEPROM using the MicroWire protocol.
9992  */
9993 static int
9994 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
9995 {
9996 	uint32_t reg, val;
9997 	int i;
9998 
9999 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10000 		device_xname(sc->sc_dev), __func__));
10001 
10002 	for (i = 0; i < wordcnt; i++) {
10003 		/* Clear SK and DI. */
10004 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
10005 		CSR_WRITE(sc, WMREG_EECD, reg);
10006 
10007 		/*
10008 		 * XXX: workaround for a bug in qemu-0.12.x and prior
10009 		 * and Xen.
10010 		 *
10011 		 * We use this workaround only for 82540 because qemu's
10012 		 * e1000 act as 82540.
10013 		 */
10014 		if (sc->sc_type == WM_T_82540) {
10015 			reg |= EECD_SK;
10016 			CSR_WRITE(sc, WMREG_EECD, reg);
10017 			reg &= ~EECD_SK;
10018 			CSR_WRITE(sc, WMREG_EECD, reg);
10019 			CSR_WRITE_FLUSH(sc);
10020 			delay(2);
10021 		}
10022 		/* XXX: end of workaround */
10023 
10024 		/* Set CHIP SELECT. */
10025 		reg |= EECD_CS;
10026 		CSR_WRITE(sc, WMREG_EECD, reg);
10027 		CSR_WRITE_FLUSH(sc);
10028 		delay(2);
10029 
10030 		/* Shift in the READ command. */
10031 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
10032 
10033 		/* Shift in address. */
10034 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
10035 
10036 		/* Shift out the data. */
10037 		wm_eeprom_recvbits(sc, &val, 16);
10038 		data[i] = val & 0xffff;
10039 
10040 		/* Clear CHIP SELECT. */
10041 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
10042 		CSR_WRITE(sc, WMREG_EECD, reg);
10043 		CSR_WRITE_FLUSH(sc);
10044 		delay(2);
10045 	}
10046 
10047 	return 0;
10048 }
10049 
10050 /* SPI */
10051 
10052 /*
10053  * Set SPI and FLASH related information from the EECD register.
10054  * For 82541 and 82547, the word size is taken from EEPROM.
10055  */
10056 static int
10057 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
10058 {
10059 	int size;
10060 	uint32_t reg;
10061 	uint16_t data;
10062 
10063 	reg = CSR_READ(sc, WMREG_EECD);
10064 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
10065 
10066 	/* Read the size of NVM from EECD by default */
10067 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10068 	switch (sc->sc_type) {
10069 	case WM_T_82541:
10070 	case WM_T_82541_2:
10071 	case WM_T_82547:
10072 	case WM_T_82547_2:
10073 		/* Set dummy value to access EEPROM */
10074 		sc->sc_nvm_wordsize = 64;
10075 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
10076 		reg = data;
10077 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10078 		if (size == 0)
10079 			size = 6; /* 64 word size */
10080 		else
10081 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
10082 		break;
10083 	case WM_T_80003:
10084 	case WM_T_82571:
10085 	case WM_T_82572:
10086 	case WM_T_82573: /* SPI case */
10087 	case WM_T_82574: /* SPI case */
10088 	case WM_T_82583: /* SPI case */
10089 		size += NVM_WORD_SIZE_BASE_SHIFT;
10090 		if (size > 14)
10091 			size = 14;
10092 		break;
10093 	case WM_T_82575:
10094 	case WM_T_82576:
10095 	case WM_T_82580:
10096 	case WM_T_I350:
10097 	case WM_T_I354:
10098 	case WM_T_I210:
10099 	case WM_T_I211:
10100 		size += NVM_WORD_SIZE_BASE_SHIFT;
10101 		if (size > 15)
10102 			size = 15;
10103 		break;
10104 	default:
10105 		aprint_error_dev(sc->sc_dev,
10106 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
10107 		return -1;
10108 		break;
10109 	}
10110 
10111 	sc->sc_nvm_wordsize = 1 << size;
10112 
10113 	return 0;
10114 }
10115 
10116 /*
10117  * wm_nvm_ready_spi:
10118  *
10119  *	Wait for a SPI EEPROM to be ready for commands.
10120  */
10121 static int
10122 wm_nvm_ready_spi(struct wm_softc *sc)
10123 {
10124 	uint32_t val;
10125 	int usec;
10126 
10127 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10128 		device_xname(sc->sc_dev), __func__));
10129 
10130 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
10131 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
10132 		wm_eeprom_recvbits(sc, &val, 8);
10133 		if ((val & SPI_SR_RDY) == 0)
10134 			break;
10135 	}
10136 	if (usec >= SPI_MAX_RETRIES) {
10137 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
10138 		return 1;
10139 	}
10140 	return 0;
10141 }
10142 
10143 /*
10144  * wm_nvm_read_spi:
10145  *
10146  *	Read a work from the EEPROM using the SPI protocol.
10147  */
10148 static int
10149 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10150 {
10151 	uint32_t reg, val;
10152 	int i;
10153 	uint8_t opc;
10154 
10155 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10156 		device_xname(sc->sc_dev), __func__));
10157 
10158 	/* Clear SK and CS. */
10159 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10160 	CSR_WRITE(sc, WMREG_EECD, reg);
10161 	CSR_WRITE_FLUSH(sc);
10162 	delay(2);
10163 
10164 	if (wm_nvm_ready_spi(sc))
10165 		return 1;
10166 
10167 	/* Toggle CS to flush commands. */
10168 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
10169 	CSR_WRITE_FLUSH(sc);
10170 	delay(2);
10171 	CSR_WRITE(sc, WMREG_EECD, reg);
10172 	CSR_WRITE_FLUSH(sc);
10173 	delay(2);
10174 
10175 	opc = SPI_OPC_READ;
10176 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
10177 		opc |= SPI_OPC_A8;
10178 
10179 	wm_eeprom_sendbits(sc, opc, 8);
10180 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
10181 
10182 	for (i = 0; i < wordcnt; i++) {
10183 		wm_eeprom_recvbits(sc, &val, 16);
10184 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
10185 	}
10186 
10187 	/* Raise CS and clear SK. */
10188 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
10189 	CSR_WRITE(sc, WMREG_EECD, reg);
10190 	CSR_WRITE_FLUSH(sc);
10191 	delay(2);
10192 
10193 	return 0;
10194 }
10195 
10196 /* Using with EERD */
10197 
10198 static int
10199 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
10200 {
10201 	uint32_t attempts = 100000;
10202 	uint32_t i, reg = 0;
10203 	int32_t done = -1;
10204 
10205 	for (i = 0; i < attempts; i++) {
10206 		reg = CSR_READ(sc, rw);
10207 
10208 		if (reg & EERD_DONE) {
10209 			done = 0;
10210 			break;
10211 		}
10212 		delay(5);
10213 	}
10214 
10215 	return done;
10216 }
10217 
10218 static int
10219 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
10220     uint16_t *data)
10221 {
10222 	int i, eerd = 0;
10223 	int error = 0;
10224 
10225 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10226 		device_xname(sc->sc_dev), __func__));
10227 
10228 	for (i = 0; i < wordcnt; i++) {
10229 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
10230 
10231 		CSR_WRITE(sc, WMREG_EERD, eerd);
10232 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
10233 		if (error != 0)
10234 			break;
10235 
10236 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
10237 	}
10238 
10239 	return error;
10240 }
10241 
10242 /* Flash */
10243 
10244 static int
10245 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10246 {
10247 	uint32_t eecd;
10248 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10249 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10250 	uint8_t sig_byte = 0;
10251 
10252 	switch (sc->sc_type) {
10253 	case WM_T_PCH_SPT:
10254 		/*
10255 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
10256 		 * sector valid bits from the NVM.
10257 		 */
10258 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
10259 		if ((*bank == 0) || (*bank == 1)) {
10260 			aprint_error_dev(sc->sc_dev,
10261 			    "%s: no valid NVM bank present (%u)\n", __func__,
10262 				*bank);
10263 			return -1;
10264 		} else {
10265 			*bank = *bank - 2;
10266 			return 0;
10267 		}
10268 	case WM_T_ICH8:
10269 	case WM_T_ICH9:
10270 		eecd = CSR_READ(sc, WMREG_EECD);
10271 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10272 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10273 			return 0;
10274 		}
10275 		/* FALLTHROUGH */
10276 	default:
10277 		/* Default to 0 */
10278 		*bank = 0;
10279 
10280 		/* Check bank 0 */
10281 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
10282 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10283 			*bank = 0;
10284 			return 0;
10285 		}
10286 
10287 		/* Check bank 1 */
10288 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
10289 		    &sig_byte);
10290 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10291 			*bank = 1;
10292 			return 0;
10293 		}
10294 	}
10295 
10296 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10297 		device_xname(sc->sc_dev)));
10298 	return -1;
10299 }
10300 
10301 /******************************************************************************
10302  * This function does initial flash setup so that a new read/write/erase cycle
10303  * can be started.
10304  *
10305  * sc - The pointer to the hw structure
10306  ****************************************************************************/
10307 static int32_t
10308 wm_ich8_cycle_init(struct wm_softc *sc)
10309 {
10310 	uint16_t hsfsts;
10311 	int32_t error = 1;
10312 	int32_t i     = 0;
10313 
10314 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10315 
10316 	/* May be check the Flash Des Valid bit in Hw status */
10317 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10318 		return error;
10319 	}
10320 
10321 	/* Clear FCERR in Hw status by writing 1 */
10322 	/* Clear DAEL in Hw status by writing a 1 */
10323 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10324 
10325 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10326 
10327 	/*
10328 	 * Either we should have a hardware SPI cycle in progress bit to check
10329 	 * against, in order to start a new cycle or FDONE bit should be
10330 	 * changed in the hardware so that it is 1 after harware reset, which
10331 	 * can then be used as an indication whether a cycle is in progress or
10332 	 * has been completed .. we should also have some software semaphore
10333 	 * mechanism to guard FDONE or the cycle in progress bit so that two
10334 	 * threads access to those bits can be sequentiallized or a way so that
10335 	 * 2 threads dont start the cycle at the same time
10336 	 */
10337 
10338 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10339 		/*
10340 		 * There is no cycle running at present, so we can start a
10341 		 * cycle
10342 		 */
10343 
10344 		/* Begin by setting Flash Cycle Done. */
10345 		hsfsts |= HSFSTS_DONE;
10346 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10347 		error = 0;
10348 	} else {
10349 		/*
10350 		 * otherwise poll for sometime so the current cycle has a
10351 		 * chance to end before giving up.
10352 		 */
10353 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10354 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10355 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10356 				error = 0;
10357 				break;
10358 			}
10359 			delay(1);
10360 		}
10361 		if (error == 0) {
10362 			/*
10363 			 * Successful in waiting for previous cycle to timeout,
10364 			 * now set the Flash Cycle Done.
10365 			 */
10366 			hsfsts |= HSFSTS_DONE;
10367 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10368 		}
10369 	}
10370 	return error;
10371 }
10372 
10373 /******************************************************************************
10374  * This function starts a flash cycle and waits for its completion
10375  *
10376  * sc - The pointer to the hw structure
10377  ****************************************************************************/
10378 static int32_t
10379 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10380 {
10381 	uint16_t hsflctl;
10382 	uint16_t hsfsts;
10383 	int32_t error = 1;
10384 	uint32_t i = 0;
10385 
10386 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10387 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10388 	hsflctl |= HSFCTL_GO;
10389 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10390 
10391 	/* Wait till FDONE bit is set to 1 */
10392 	do {
10393 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10394 		if (hsfsts & HSFSTS_DONE)
10395 			break;
10396 		delay(1);
10397 		i++;
10398 	} while (i < timeout);
10399 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10400 		error = 0;
10401 
10402 	return error;
10403 }
10404 
10405 /******************************************************************************
10406  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
10407  *
10408  * sc - The pointer to the hw structure
10409  * index - The index of the byte or word to read.
10410  * size - Size of data to read, 1=byte 2=word, 4=dword
10411  * data - Pointer to the word to store the value read.
10412  *****************************************************************************/
10413 static int32_t
10414 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10415     uint32_t size, uint32_t *data)
10416 {
10417 	uint16_t hsfsts;
10418 	uint16_t hsflctl;
10419 	uint32_t flash_linear_address;
10420 	uint32_t flash_data = 0;
10421 	int32_t error = 1;
10422 	int32_t count = 0;
10423 
10424 	if (size < 1  || size > 4 || data == 0x0 ||
10425 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
10426 		return error;
10427 
10428 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10429 	    sc->sc_ich8_flash_base;
10430 
10431 	do {
10432 		delay(1);
10433 		/* Steps */
10434 		error = wm_ich8_cycle_init(sc);
10435 		if (error)
10436 			break;
10437 
10438 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10439 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10440 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10441 		    & HSFCTL_BCOUNT_MASK;
10442 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10443 		if (sc->sc_type == WM_T_PCH_SPT) {
10444 			/*
10445 			 * In SPT, This register is in Lan memory space, not
10446 			 * flash. Therefore, only 32 bit access is supported.
10447 			 */
10448 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
10449 			    (uint32_t)hsflctl);
10450 		} else
10451 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10452 
10453 		/*
10454 		 * Write the last 24 bits of index into Flash Linear address
10455 		 * field in Flash Address
10456 		 */
10457 		/* TODO: TBD maybe check the index against the size of flash */
10458 
10459 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10460 
10461 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10462 
10463 		/*
10464 		 * Check if FCERR is set to 1, if set to 1, clear it and try
10465 		 * the whole sequence a few more times, else read in (shift in)
10466 		 * the Flash Data0, the order is least significant byte first
10467 		 * msb to lsb
10468 		 */
10469 		if (error == 0) {
10470 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10471 			if (size == 1)
10472 				*data = (uint8_t)(flash_data & 0x000000FF);
10473 			else if (size == 2)
10474 				*data = (uint16_t)(flash_data & 0x0000FFFF);
10475 			else if (size == 4)
10476 				*data = (uint32_t)flash_data;
10477 			break;
10478 		} else {
10479 			/*
10480 			 * If we've gotten here, then things are probably
10481 			 * completely hosed, but if the error condition is
10482 			 * detected, it won't hurt to give it another try...
10483 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10484 			 */
10485 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10486 			if (hsfsts & HSFSTS_ERR) {
10487 				/* Repeat for some time before giving up. */
10488 				continue;
10489 			} else if ((hsfsts & HSFSTS_DONE) == 0)
10490 				break;
10491 		}
10492 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10493 
10494 	return error;
10495 }
10496 
10497 /******************************************************************************
10498  * Reads a single byte from the NVM using the ICH8 flash access registers.
10499  *
10500  * sc - pointer to wm_hw structure
10501  * index - The index of the byte to read.
10502  * data - Pointer to a byte to store the value read.
10503  *****************************************************************************/
10504 static int32_t
10505 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10506 {
10507 	int32_t status;
10508 	uint32_t word = 0;
10509 
10510 	status = wm_read_ich8_data(sc, index, 1, &word);
10511 	if (status == 0)
10512 		*data = (uint8_t)word;
10513 	else
10514 		*data = 0;
10515 
10516 	return status;
10517 }
10518 
10519 /******************************************************************************
10520  * Reads a word from the NVM using the ICH8 flash access registers.
10521  *
10522  * sc - pointer to wm_hw structure
10523  * index - The starting byte index of the word to read.
10524  * data - Pointer to a word to store the value read.
10525  *****************************************************************************/
10526 static int32_t
10527 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10528 {
10529 	int32_t status;
10530 	uint32_t word = 0;
10531 
10532 	status = wm_read_ich8_data(sc, index, 2, &word);
10533 	if (status == 0)
10534 		*data = (uint16_t)word;
10535 	else
10536 		*data = 0;
10537 
10538 	return status;
10539 }
10540 
10541 /******************************************************************************
10542  * Reads a dword from the NVM using the ICH8 flash access registers.
10543  *
10544  * sc - pointer to wm_hw structure
10545  * index - The starting byte index of the word to read.
10546  * data - Pointer to a word to store the value read.
10547  *****************************************************************************/
10548 static int32_t
10549 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
10550 {
10551 	int32_t status;
10552 
10553 	status = wm_read_ich8_data(sc, index, 4, data);
10554 	return status;
10555 }
10556 
10557 /******************************************************************************
10558  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10559  * register.
10560  *
10561  * sc - Struct containing variables accessed by shared code
10562  * offset - offset of word in the EEPROM to read
10563  * data - word read from the EEPROM
10564  * words - number of words to read
10565  *****************************************************************************/
10566 static int
10567 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10568 {
10569 	int32_t  error = 0;
10570 	uint32_t flash_bank = 0;
10571 	uint32_t act_offset = 0;
10572 	uint32_t bank_offset = 0;
10573 	uint16_t word = 0;
10574 	uint16_t i = 0;
10575 
10576 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10577 		device_xname(sc->sc_dev), __func__));
10578 
10579 	/*
10580 	 * We need to know which is the valid flash bank.  In the event
10581 	 * that we didn't allocate eeprom_shadow_ram, we may not be
10582 	 * managing flash_bank.  So it cannot be trusted and needs
10583 	 * to be updated with each read.
10584 	 */
10585 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10586 	if (error) {
10587 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10588 			device_xname(sc->sc_dev)));
10589 		flash_bank = 0;
10590 	}
10591 
10592 	/*
10593 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
10594 	 * size
10595 	 */
10596 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10597 
10598 	error = wm_get_swfwhw_semaphore(sc);
10599 	if (error) {
10600 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10601 		    __func__);
10602 		return error;
10603 	}
10604 
10605 	for (i = 0; i < words; i++) {
10606 		/* The NVM part needs a byte offset, hence * 2 */
10607 		act_offset = bank_offset + ((offset + i) * 2);
10608 		error = wm_read_ich8_word(sc, act_offset, &word);
10609 		if (error) {
10610 			aprint_error_dev(sc->sc_dev,
10611 			    "%s: failed to read NVM\n", __func__);
10612 			break;
10613 		}
10614 		data[i] = word;
10615 	}
10616 
10617 	wm_put_swfwhw_semaphore(sc);
10618 	return error;
10619 }
10620 
10621 /******************************************************************************
10622  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
10623  * register.
10624  *
10625  * sc - Struct containing variables accessed by shared code
10626  * offset - offset of word in the EEPROM to read
10627  * data - word read from the EEPROM
10628  * words - number of words to read
10629  *****************************************************************************/
10630 static int
10631 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
10632 {
10633 	int32_t  error = 0;
10634 	uint32_t flash_bank = 0;
10635 	uint32_t act_offset = 0;
10636 	uint32_t bank_offset = 0;
10637 	uint32_t dword = 0;
10638 	uint16_t i = 0;
10639 
10640 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10641 		device_xname(sc->sc_dev), __func__));
10642 
10643 	/*
10644 	 * We need to know which is the valid flash bank.  In the event
10645 	 * that we didn't allocate eeprom_shadow_ram, we may not be
10646 	 * managing flash_bank.  So it cannot be trusted and needs
10647 	 * to be updated with each read.
10648 	 */
10649 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10650 	if (error) {
10651 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10652 			device_xname(sc->sc_dev)));
10653 		flash_bank = 0;
10654 	}
10655 
10656 	/*
10657 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
10658 	 * size
10659 	 */
10660 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10661 
10662 	error = wm_get_swfwhw_semaphore(sc);
10663 	if (error) {
10664 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10665 		    __func__);
10666 		return error;
10667 	}
10668 
10669 	for (i = 0; i < words; i++) {
10670 		/* The NVM part needs a byte offset, hence * 2 */
10671 		act_offset = bank_offset + ((offset + i) * 2);
10672 		/* but we must read dword aligned, so mask ... */
10673 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
10674 		if (error) {
10675 			aprint_error_dev(sc->sc_dev,
10676 			    "%s: failed to read NVM\n", __func__);
10677 			break;
10678 		}
10679 		/* ... and pick out low or high word */
10680 		if ((act_offset & 0x2) == 0)
10681 			data[i] = (uint16_t)(dword & 0xFFFF);
10682 		else
10683 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
10684 	}
10685 
10686 	wm_put_swfwhw_semaphore(sc);
10687 	return error;
10688 }
10689 
10690 /* iNVM */
10691 
10692 static int
10693 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
10694 {
10695 	int32_t  rv = 0;
10696 	uint32_t invm_dword;
10697 	uint16_t i;
10698 	uint8_t record_type, word_address;
10699 
10700 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10701 		device_xname(sc->sc_dev), __func__));
10702 
10703 	for (i = 0; i < INVM_SIZE; i++) {
10704 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
10705 		/* Get record type */
10706 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
10707 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
10708 			break;
10709 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
10710 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
10711 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
10712 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
10713 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
10714 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
10715 			if (word_address == address) {
10716 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
10717 				rv = 0;
10718 				break;
10719 			}
10720 		}
10721 	}
10722 
10723 	return rv;
10724 }
10725 
10726 static int
10727 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
10728 {
10729 	int rv = 0;
10730 	int i;
10731 
10732 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10733 		device_xname(sc->sc_dev), __func__));
10734 
10735 	for (i = 0; i < words; i++) {
10736 		switch (offset + i) {
10737 		case NVM_OFF_MACADDR:
10738 		case NVM_OFF_MACADDR1:
10739 		case NVM_OFF_MACADDR2:
10740 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
10741 			if (rv != 0) {
10742 				data[i] = 0xffff;
10743 				rv = -1;
10744 			}
10745 			break;
10746 		case NVM_OFF_CFG2:
10747 			rv = wm_nvm_read_word_invm(sc, offset, data);
10748 			if (rv != 0) {
10749 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
10750 				rv = 0;
10751 			}
10752 			break;
10753 		case NVM_OFF_CFG4:
10754 			rv = wm_nvm_read_word_invm(sc, offset, data);
10755 			if (rv != 0) {
10756 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
10757 				rv = 0;
10758 			}
10759 			break;
10760 		case NVM_OFF_LED_1_CFG:
10761 			rv = wm_nvm_read_word_invm(sc, offset, data);
10762 			if (rv != 0) {
10763 				*data = NVM_LED_1_CFG_DEFAULT_I211;
10764 				rv = 0;
10765 			}
10766 			break;
10767 		case NVM_OFF_LED_0_2_CFG:
10768 			rv = wm_nvm_read_word_invm(sc, offset, data);
10769 			if (rv != 0) {
10770 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
10771 				rv = 0;
10772 			}
10773 			break;
10774 		case NVM_OFF_ID_LED_SETTINGS:
10775 			rv = wm_nvm_read_word_invm(sc, offset, data);
10776 			if (rv != 0) {
10777 				*data = ID_LED_RESERVED_FFFF;
10778 				rv = 0;
10779 			}
10780 			break;
10781 		default:
10782 			DPRINTF(WM_DEBUG_NVM,
10783 			    ("NVM word 0x%02x is not mapped.\n", offset));
10784 			*data = NVM_RESERVED_WORD;
10785 			break;
10786 		}
10787 	}
10788 
10789 	return rv;
10790 }
10791 
10792 /* Lock, detecting NVM type, validate checksum, version and read */
10793 
10794 /*
10795  * wm_nvm_acquire:
10796  *
10797  *	Perform the EEPROM handshake required on some chips.
10798  */
10799 static int
10800 wm_nvm_acquire(struct wm_softc *sc)
10801 {
10802 	uint32_t reg;
10803 	int x;
10804 	int ret = 0;
10805 
10806 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10807 		device_xname(sc->sc_dev), __func__));
10808 
10809 	if (sc->sc_type >= WM_T_ICH8) {
10810 		ret = wm_get_nvm_ich8lan(sc);
10811 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
10812 		ret = wm_get_swfwhw_semaphore(sc);
10813 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
10814 		/* This will also do wm_get_swsm_semaphore() if needed */
10815 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
10816 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
10817 		ret = wm_get_swsm_semaphore(sc);
10818 	}
10819 
10820 	if (ret) {
10821 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10822 			__func__);
10823 		return 1;
10824 	}
10825 
10826 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10827 		reg = CSR_READ(sc, WMREG_EECD);
10828 
10829 		/* Request EEPROM access. */
10830 		reg |= EECD_EE_REQ;
10831 		CSR_WRITE(sc, WMREG_EECD, reg);
10832 
10833 		/* ..and wait for it to be granted. */
10834 		for (x = 0; x < 1000; x++) {
10835 			reg = CSR_READ(sc, WMREG_EECD);
10836 			if (reg & EECD_EE_GNT)
10837 				break;
10838 			delay(5);
10839 		}
10840 		if ((reg & EECD_EE_GNT) == 0) {
10841 			aprint_error_dev(sc->sc_dev,
10842 			    "could not acquire EEPROM GNT\n");
10843 			reg &= ~EECD_EE_REQ;
10844 			CSR_WRITE(sc, WMREG_EECD, reg);
10845 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10846 				wm_put_swfwhw_semaphore(sc);
10847 			if (sc->sc_flags & WM_F_LOCK_SWFW)
10848 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10849 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
10850 				wm_put_swsm_semaphore(sc);
10851 			return 1;
10852 		}
10853 	}
10854 
10855 	return 0;
10856 }
10857 
10858 /*
10859  * wm_nvm_release:
10860  *
10861  *	Release the EEPROM mutex.
10862  */
10863 static void
10864 wm_nvm_release(struct wm_softc *sc)
10865 {
10866 	uint32_t reg;
10867 
10868 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10869 		device_xname(sc->sc_dev), __func__));
10870 
10871 	if (sc->sc_flags & WM_F_LOCK_EECD) {
10872 		reg = CSR_READ(sc, WMREG_EECD);
10873 		reg &= ~EECD_EE_REQ;
10874 		CSR_WRITE(sc, WMREG_EECD, reg);
10875 	}
10876 
10877 	if (sc->sc_type >= WM_T_ICH8) {
10878 		wm_put_nvm_ich8lan(sc);
10879 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
10880 		wm_put_swfwhw_semaphore(sc);
10881 	if (sc->sc_flags & WM_F_LOCK_SWFW)
10882 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
10883 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
10884 		wm_put_swsm_semaphore(sc);
10885 }
10886 
10887 static int
10888 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
10889 {
10890 	uint32_t eecd = 0;
10891 
10892 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
10893 	    || sc->sc_type == WM_T_82583) {
10894 		eecd = CSR_READ(sc, WMREG_EECD);
10895 
10896 		/* Isolate bits 15 & 16 */
10897 		eecd = ((eecd >> 15) & 0x03);
10898 
10899 		/* If both bits are set, device is Flash type */
10900 		if (eecd == 0x03)
10901 			return 0;
10902 	}
10903 	return 1;
10904 }
10905 
10906 static int
10907 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
10908 {
10909 	uint32_t eec;
10910 
10911 	eec = CSR_READ(sc, WMREG_EEC);
10912 	if ((eec & EEC_FLASH_DETECTED) != 0)
10913 		return 1;
10914 
10915 	return 0;
10916 }
10917 
10918 /*
10919  * wm_nvm_validate_checksum
10920  *
10921  * The checksum is defined as the sum of the first 64 (16 bit) words.
10922  */
10923 static int
10924 wm_nvm_validate_checksum(struct wm_softc *sc)
10925 {
10926 	uint16_t checksum;
10927 	uint16_t eeprom_data;
10928 #ifdef WM_DEBUG
10929 	uint16_t csum_wordaddr, valid_checksum;
10930 #endif
10931 	int i;
10932 
10933 	checksum = 0;
10934 
10935 	/* Don't check for I211 */
10936 	if (sc->sc_type == WM_T_I211)
10937 		return 0;
10938 
10939 #ifdef WM_DEBUG
10940 	if (sc->sc_type == WM_T_PCH_LPT) {
10941 		csum_wordaddr = NVM_OFF_COMPAT;
10942 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
10943 	} else {
10944 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
10945 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
10946 	}
10947 
10948 	/* Dump EEPROM image for debug */
10949 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
10950 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
10951 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
10952 		/* XXX PCH_SPT? */
10953 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
10954 		if ((eeprom_data & valid_checksum) == 0) {
10955 			DPRINTF(WM_DEBUG_NVM,
10956 			    ("%s: NVM need to be updated (%04x != %04x)\n",
10957 				device_xname(sc->sc_dev), eeprom_data,
10958 				    valid_checksum));
10959 		}
10960 	}
10961 
10962 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
10963 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
10964 		for (i = 0; i < NVM_SIZE; i++) {
10965 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
10966 				printf("XXXX ");
10967 			else
10968 				printf("%04hx ", eeprom_data);
10969 			if (i % 8 == 7)
10970 				printf("\n");
10971 		}
10972 	}
10973 
10974 #endif /* WM_DEBUG */
10975 
10976 	for (i = 0; i < NVM_SIZE; i++) {
10977 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
10978 			return 1;
10979 		checksum += eeprom_data;
10980 	}
10981 
10982 	if (checksum != (uint16_t) NVM_CHECKSUM) {
10983 #ifdef WM_DEBUG
10984 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
10985 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
10986 #endif
10987 	}
10988 
10989 	return 0;
10990 }
10991 
10992 static void
10993 wm_nvm_version_invm(struct wm_softc *sc)
10994 {
10995 	uint32_t dword;
10996 
10997 	/*
10998 	 * Linux's code to decode version is very strange, so we don't
10999 	 * obey that algorithm and just use word 61 as the document.
11000 	 * Perhaps it's not perfect though...
11001 	 *
11002 	 * Example:
11003 	 *
11004 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
11005 	 */
11006 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
11007 	dword = __SHIFTOUT(dword, INVM_VER_1);
11008 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
11009 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
11010 }
11011 
11012 static void
11013 wm_nvm_version(struct wm_softc *sc)
11014 {
11015 	uint16_t major, minor, build, patch;
11016 	uint16_t uid0, uid1;
11017 	uint16_t nvm_data;
11018 	uint16_t off;
11019 	bool check_version = false;
11020 	bool check_optionrom = false;
11021 	bool have_build = false;
11022 
11023 	/*
11024 	 * Version format:
11025 	 *
11026 	 * XYYZ
11027 	 * X0YZ
11028 	 * X0YY
11029 	 *
11030 	 * Example:
11031 	 *
11032 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
11033 	 *	82571	0x50a6	5.10.6?
11034 	 *	82572	0x506a	5.6.10?
11035 	 *	82572EI	0x5069	5.6.9?
11036 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
11037 	 *		0x2013	2.1.3?
11038 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
11039 	 */
11040 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
11041 	switch (sc->sc_type) {
11042 	case WM_T_82571:
11043 	case WM_T_82572:
11044 	case WM_T_82574:
11045 	case WM_T_82583:
11046 		check_version = true;
11047 		check_optionrom = true;
11048 		have_build = true;
11049 		break;
11050 	case WM_T_82575:
11051 	case WM_T_82576:
11052 	case WM_T_82580:
11053 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
11054 			check_version = true;
11055 		break;
11056 	case WM_T_I211:
11057 		wm_nvm_version_invm(sc);
11058 		goto printver;
11059 	case WM_T_I210:
11060 		if (!wm_nvm_get_flash_presence_i210(sc)) {
11061 			wm_nvm_version_invm(sc);
11062 			goto printver;
11063 		}
11064 		/* FALLTHROUGH */
11065 	case WM_T_I350:
11066 	case WM_T_I354:
11067 		check_version = true;
11068 		check_optionrom = true;
11069 		break;
11070 	default:
11071 		return;
11072 	}
11073 	if (check_version) {
11074 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
11075 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
11076 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
11077 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
11078 			build = nvm_data & NVM_BUILD_MASK;
11079 			have_build = true;
11080 		} else
11081 			minor = nvm_data & 0x00ff;
11082 
11083 		/* Decimal */
11084 		minor = (minor / 16) * 10 + (minor % 16);
11085 		sc->sc_nvm_ver_major = major;
11086 		sc->sc_nvm_ver_minor = minor;
11087 
11088 printver:
11089 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
11090 		    sc->sc_nvm_ver_minor);
11091 		if (have_build) {
11092 			sc->sc_nvm_ver_build = build;
11093 			aprint_verbose(".%d", build);
11094 		}
11095 	}
11096 	if (check_optionrom) {
11097 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
11098 		/* Option ROM Version */
11099 		if ((off != 0x0000) && (off != 0xffff)) {
11100 			off += NVM_COMBO_VER_OFF;
11101 			wm_nvm_read(sc, off + 1, 1, &uid1);
11102 			wm_nvm_read(sc, off, 1, &uid0);
11103 			if ((uid0 != 0) && (uid0 != 0xffff)
11104 			    && (uid1 != 0) && (uid1 != 0xffff)) {
11105 				/* 16bits */
11106 				major = uid0 >> 8;
11107 				build = (uid0 << 8) | (uid1 >> 8);
11108 				patch = uid1 & 0x00ff;
11109 				aprint_verbose(", option ROM Version %d.%d.%d",
11110 				    major, build, patch);
11111 			}
11112 		}
11113 	}
11114 
11115 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
11116 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
11117 }
11118 
11119 /*
11120  * wm_nvm_read:
11121  *
11122  *	Read data from the serial EEPROM.
11123  */
11124 static int
11125 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11126 {
11127 	int rv;
11128 
11129 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11130 		device_xname(sc->sc_dev), __func__));
11131 
11132 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
11133 		return 1;
11134 
11135 	if (wm_nvm_acquire(sc))
11136 		return 1;
11137 
11138 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11139 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11140 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
11141 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
11142 	else if (sc->sc_type == WM_T_PCH_SPT)
11143 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
11144 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
11145 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
11146 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
11147 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
11148 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
11149 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
11150 	else
11151 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
11152 
11153 	wm_nvm_release(sc);
11154 	return rv;
11155 }
11156 
11157 /*
11158  * Hardware semaphores.
11159  * Very complexed...
11160  */
11161 
11162 static int
11163 wm_get_null(struct wm_softc *sc)
11164 {
11165 
11166 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11167 		device_xname(sc->sc_dev), __func__));
11168 	return 0;
11169 }
11170 
11171 static void
11172 wm_put_null(struct wm_softc *sc)
11173 {
11174 
11175 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11176 		device_xname(sc->sc_dev), __func__));
11177 	return;
11178 }
11179 
11180 /*
11181  * Get hardware semaphore.
11182  * Same as e1000_get_hw_semaphore_generic()
11183  */
11184 static int
11185 wm_get_swsm_semaphore(struct wm_softc *sc)
11186 {
11187 	int32_t timeout;
11188 	uint32_t swsm;
11189 
11190 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11191 		device_xname(sc->sc_dev), __func__));
11192 	KASSERT(sc->sc_nvm_wordsize > 0);
11193 
11194 	/* Get the SW semaphore. */
11195 	timeout = sc->sc_nvm_wordsize + 1;
11196 	while (timeout) {
11197 		swsm = CSR_READ(sc, WMREG_SWSM);
11198 
11199 		if ((swsm & SWSM_SMBI) == 0)
11200 			break;
11201 
11202 		delay(50);
11203 		timeout--;
11204 	}
11205 
11206 	if (timeout == 0) {
11207 		aprint_error_dev(sc->sc_dev,
11208 		    "could not acquire SWSM SMBI\n");
11209 		return 1;
11210 	}
11211 
11212 	/* Get the FW semaphore. */
11213 	timeout = sc->sc_nvm_wordsize + 1;
11214 	while (timeout) {
11215 		swsm = CSR_READ(sc, WMREG_SWSM);
11216 		swsm |= SWSM_SWESMBI;
11217 		CSR_WRITE(sc, WMREG_SWSM, swsm);
11218 		/* If we managed to set the bit we got the semaphore. */
11219 		swsm = CSR_READ(sc, WMREG_SWSM);
11220 		if (swsm & SWSM_SWESMBI)
11221 			break;
11222 
11223 		delay(50);
11224 		timeout--;
11225 	}
11226 
11227 	if (timeout == 0) {
11228 		aprint_error_dev(sc->sc_dev,
11229 		    "could not acquire SWSM SWESMBI\n");
11230 		/* Release semaphores */
11231 		wm_put_swsm_semaphore(sc);
11232 		return 1;
11233 	}
11234 	return 0;
11235 }
11236 
11237 /*
11238  * Put hardware semaphore.
11239  * Same as e1000_put_hw_semaphore_generic()
11240  */
11241 static void
11242 wm_put_swsm_semaphore(struct wm_softc *sc)
11243 {
11244 	uint32_t swsm;
11245 
11246 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11247 		device_xname(sc->sc_dev), __func__));
11248 
11249 	swsm = CSR_READ(sc, WMREG_SWSM);
11250 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
11251 	CSR_WRITE(sc, WMREG_SWSM, swsm);
11252 }
11253 
11254 /*
11255  * Get SW/FW semaphore.
11256  * Same as e1000_acquire_swfw_sync_82575().
11257  */
11258 static int
11259 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11260 {
11261 	uint32_t swfw_sync;
11262 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
11263 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
11264 	int timeout = 200;
11265 
11266 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11267 		device_xname(sc->sc_dev), __func__));
11268 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11269 
11270 	for (timeout = 0; timeout < 200; timeout++) {
11271 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
11272 			if (wm_get_swsm_semaphore(sc)) {
11273 				aprint_error_dev(sc->sc_dev,
11274 				    "%s: failed to get semaphore\n",
11275 				    __func__);
11276 				return 1;
11277 			}
11278 		}
11279 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11280 		if ((swfw_sync & (swmask | fwmask)) == 0) {
11281 			swfw_sync |= swmask;
11282 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11283 			if (sc->sc_flags & WM_F_LOCK_SWSM)
11284 				wm_put_swsm_semaphore(sc);
11285 			return 0;
11286 		}
11287 		if (sc->sc_flags & WM_F_LOCK_SWSM)
11288 			wm_put_swsm_semaphore(sc);
11289 		delay(5000);
11290 	}
11291 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
11292 	    device_xname(sc->sc_dev), mask, swfw_sync);
11293 	return 1;
11294 }
11295 
11296 static void
11297 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11298 {
11299 	uint32_t swfw_sync;
11300 
11301 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11302 		device_xname(sc->sc_dev), __func__));
11303 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11304 
11305 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
11306 		while (wm_get_swsm_semaphore(sc) != 0)
11307 			continue;
11308 	}
11309 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11310 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
11311 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11312 	if (sc->sc_flags & WM_F_LOCK_SWSM)
11313 		wm_put_swsm_semaphore(sc);
11314 }
11315 
11316 static int
11317 wm_get_phy_82575(struct wm_softc *sc)
11318 {
11319 
11320 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11321 		device_xname(sc->sc_dev), __func__));
11322 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11323 }
11324 
11325 static void
11326 wm_put_phy_82575(struct wm_softc *sc)
11327 {
11328 
11329 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11330 		device_xname(sc->sc_dev), __func__));
11331 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11332 }
11333 
11334 static int
11335 wm_get_swfwhw_semaphore(struct wm_softc *sc)
11336 {
11337 	uint32_t ext_ctrl;
11338 	int timeout = 200;
11339 
11340 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11341 		device_xname(sc->sc_dev), __func__));
11342 
11343 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11344 	for (timeout = 0; timeout < 200; timeout++) {
11345 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11346 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11347 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11348 
11349 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11350 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11351 			return 0;
11352 		delay(5000);
11353 	}
11354 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
11355 	    device_xname(sc->sc_dev), ext_ctrl);
11356 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11357 	return 1;
11358 }
11359 
11360 static void
11361 wm_put_swfwhw_semaphore(struct wm_softc *sc)
11362 {
11363 	uint32_t ext_ctrl;
11364 
11365 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11366 		device_xname(sc->sc_dev), __func__));
11367 
11368 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11369 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11370 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11371 
11372 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11373 }
11374 
11375 static int
11376 wm_get_swflag_ich8lan(struct wm_softc *sc)
11377 {
11378 	uint32_t ext_ctrl;
11379 	int timeout;
11380 
11381 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11382 		device_xname(sc->sc_dev), __func__));
11383 	mutex_enter(sc->sc_ich_phymtx);
11384 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
11385 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11386 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
11387 			break;
11388 		delay(1000);
11389 	}
11390 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
11391 		printf("%s: SW has already locked the resource\n",
11392 		    device_xname(sc->sc_dev));
11393 		goto out;
11394 	}
11395 
11396 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11397 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11398 	for (timeout = 0; timeout < 1000; timeout++) {
11399 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11400 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11401 			break;
11402 		delay(1000);
11403 	}
11404 	if (timeout >= 1000) {
11405 		printf("%s: failed to acquire semaphore\n",
11406 		    device_xname(sc->sc_dev));
11407 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11408 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11409 		goto out;
11410 	}
11411 	return 0;
11412 
11413 out:
11414 	mutex_exit(sc->sc_ich_phymtx);
11415 	return 1;
11416 }
11417 
11418 static void
11419 wm_put_swflag_ich8lan(struct wm_softc *sc)
11420 {
11421 	uint32_t ext_ctrl;
11422 
11423 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11424 		device_xname(sc->sc_dev), __func__));
11425 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11426 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
11427 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11428 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11429 	} else {
11430 		printf("%s: Semaphore unexpectedly released\n",
11431 		    device_xname(sc->sc_dev));
11432 	}
11433 
11434 	mutex_exit(sc->sc_ich_phymtx);
11435 }
11436 
11437 static int
11438 wm_get_nvm_ich8lan(struct wm_softc *sc)
11439 {
11440 
11441 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11442 		device_xname(sc->sc_dev), __func__));
11443 	mutex_enter(sc->sc_ich_nvmmtx);
11444 
11445 	return 0;
11446 }
11447 
11448 static void
11449 wm_put_nvm_ich8lan(struct wm_softc *sc)
11450 {
11451 
11452 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11453 		device_xname(sc->sc_dev), __func__));
11454 	mutex_exit(sc->sc_ich_nvmmtx);
11455 }
11456 
11457 static int
11458 wm_get_hw_semaphore_82573(struct wm_softc *sc)
11459 {
11460 	int i = 0;
11461 	uint32_t reg;
11462 
11463 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11464 		device_xname(sc->sc_dev), __func__));
11465 
11466 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11467 	do {
11468 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
11469 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
11470 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11471 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
11472 			break;
11473 		delay(2*1000);
11474 		i++;
11475 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
11476 
11477 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
11478 		wm_put_hw_semaphore_82573(sc);
11479 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
11480 		    device_xname(sc->sc_dev));
11481 		return -1;
11482 	}
11483 
11484 	return 0;
11485 }
11486 
11487 static void
11488 wm_put_hw_semaphore_82573(struct wm_softc *sc)
11489 {
11490 	uint32_t reg;
11491 
11492 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11493 		device_xname(sc->sc_dev), __func__));
11494 
11495 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11496 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11497 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11498 }
11499 
11500 /*
11501  * Management mode and power management related subroutines.
11502  * BMC, AMT, suspend/resume and EEE.
11503  */
11504 
11505 #ifdef WM_WOL
11506 static int
11507 wm_check_mng_mode(struct wm_softc *sc)
11508 {
11509 	int rv;
11510 
11511 	switch (sc->sc_type) {
11512 	case WM_T_ICH8:
11513 	case WM_T_ICH9:
11514 	case WM_T_ICH10:
11515 	case WM_T_PCH:
11516 	case WM_T_PCH2:
11517 	case WM_T_PCH_LPT:
11518 	case WM_T_PCH_SPT:
11519 		rv = wm_check_mng_mode_ich8lan(sc);
11520 		break;
11521 	case WM_T_82574:
11522 	case WM_T_82583:
11523 		rv = wm_check_mng_mode_82574(sc);
11524 		break;
11525 	case WM_T_82571:
11526 	case WM_T_82572:
11527 	case WM_T_82573:
11528 	case WM_T_80003:
11529 		rv = wm_check_mng_mode_generic(sc);
11530 		break;
11531 	default:
11532 		/* noting to do */
11533 		rv = 0;
11534 		break;
11535 	}
11536 
11537 	return rv;
11538 }
11539 
11540 static int
11541 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11542 {
11543 	uint32_t fwsm;
11544 
11545 	fwsm = CSR_READ(sc, WMREG_FWSM);
11546 
11547 	if (((fwsm & FWSM_FW_VALID) != 0)
11548 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11549 		return 1;
11550 
11551 	return 0;
11552 }
11553 
11554 static int
11555 wm_check_mng_mode_82574(struct wm_softc *sc)
11556 {
11557 	uint16_t data;
11558 
11559 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11560 
11561 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
11562 		return 1;
11563 
11564 	return 0;
11565 }
11566 
11567 static int
11568 wm_check_mng_mode_generic(struct wm_softc *sc)
11569 {
11570 	uint32_t fwsm;
11571 
11572 	fwsm = CSR_READ(sc, WMREG_FWSM);
11573 
11574 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11575 		return 1;
11576 
11577 	return 0;
11578 }
11579 #endif /* WM_WOL */
11580 
11581 static int
11582 wm_enable_mng_pass_thru(struct wm_softc *sc)
11583 {
11584 	uint32_t manc, fwsm, factps;
11585 
11586 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11587 		return 0;
11588 
11589 	manc = CSR_READ(sc, WMREG_MANC);
11590 
11591 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11592 		device_xname(sc->sc_dev), manc));
11593 	if ((manc & MANC_RECV_TCO_EN) == 0)
11594 		return 0;
11595 
11596 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11597 		fwsm = CSR_READ(sc, WMREG_FWSM);
11598 		factps = CSR_READ(sc, WMREG_FACTPS);
11599 		if (((factps & FACTPS_MNGCG) == 0)
11600 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11601 			return 1;
11602 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11603 		uint16_t data;
11604 
11605 		factps = CSR_READ(sc, WMREG_FACTPS);
11606 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11607 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11608 			device_xname(sc->sc_dev), factps, data));
11609 		if (((factps & FACTPS_MNGCG) == 0)
11610 		    && ((data & NVM_CFG2_MNGM_MASK)
11611 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11612 			return 1;
11613 	} else if (((manc & MANC_SMBUS_EN) != 0)
11614 	    && ((manc & MANC_ASF_EN) == 0))
11615 		return 1;
11616 
11617 	return 0;
11618 }
11619 
11620 static bool
11621 wm_phy_resetisblocked(struct wm_softc *sc)
11622 {
11623 	bool blocked = false;
11624 	uint32_t reg;
11625 	int i = 0;
11626 
11627 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11628 		device_xname(sc->sc_dev), __func__));
11629 
11630 	switch (sc->sc_type) {
11631 	case WM_T_ICH8:
11632 	case WM_T_ICH9:
11633 	case WM_T_ICH10:
11634 	case WM_T_PCH:
11635 	case WM_T_PCH2:
11636 	case WM_T_PCH_LPT:
11637 	case WM_T_PCH_SPT:
11638 		do {
11639 			reg = CSR_READ(sc, WMREG_FWSM);
11640 			if ((reg & FWSM_RSPCIPHY) == 0) {
11641 				blocked = true;
11642 				delay(10*1000);
11643 				continue;
11644 			}
11645 			blocked = false;
11646 		} while (blocked && (i++ < 30));
11647 		return blocked;
11648 		break;
11649 	case WM_T_82571:
11650 	case WM_T_82572:
11651 	case WM_T_82573:
11652 	case WM_T_82574:
11653 	case WM_T_82583:
11654 	case WM_T_80003:
11655 		reg = CSR_READ(sc, WMREG_MANC);
11656 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
11657 			return true;
11658 		else
11659 			return false;
11660 		break;
11661 	default:
11662 		/* no problem */
11663 		break;
11664 	}
11665 
11666 	return false;
11667 }
11668 
11669 static void
11670 wm_get_hw_control(struct wm_softc *sc)
11671 {
11672 	uint32_t reg;
11673 
11674 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11675 		device_xname(sc->sc_dev), __func__));
11676 
11677 	switch (sc->sc_type) {
11678 	case WM_T_82573:
11679 		reg = CSR_READ(sc, WMREG_SWSM);
11680 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
11681 		break;
11682 	case WM_T_82571:
11683 	case WM_T_82572:
11684 	case WM_T_82574:
11685 	case WM_T_82583:
11686 	case WM_T_80003:
11687 	case WM_T_ICH8:
11688 	case WM_T_ICH9:
11689 	case WM_T_ICH10:
11690 	case WM_T_PCH:
11691 	case WM_T_PCH2:
11692 	case WM_T_PCH_LPT:
11693 	case WM_T_PCH_SPT:
11694 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11695 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
11696 		break;
11697 	default:
11698 		break;
11699 	}
11700 }
11701 
11702 static void
11703 wm_release_hw_control(struct wm_softc *sc)
11704 {
11705 	uint32_t reg;
11706 
11707 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11708 		device_xname(sc->sc_dev), __func__));
11709 
11710 	if ((sc->sc_flags & WM_F_HAS_MANAGE) == 0)
11711 		return;
11712 
11713 	if (sc->sc_type == WM_T_82573) {
11714 		reg = CSR_READ(sc, WMREG_SWSM);
11715 		reg &= ~SWSM_DRV_LOAD;
11716 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
11717 	} else {
11718 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
11719 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
11720 	}
11721 }
11722 
11723 static void
11724 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
11725 {
11726 	uint32_t reg;
11727 
11728 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11729 		device_xname(sc->sc_dev), __func__));
11730 
11731 	if (sc->sc_type < WM_T_PCH2)
11732 		return;
11733 
11734 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11735 
11736 	if (gate)
11737 		reg |= EXTCNFCTR_GATE_PHY_CFG;
11738 	else
11739 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
11740 
11741 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11742 }
11743 
11744 static void
11745 wm_smbustopci(struct wm_softc *sc)
11746 {
11747 	uint32_t fwsm, reg;
11748 
11749 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11750 		device_xname(sc->sc_dev), __func__));
11751 
11752 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
11753 	wm_gate_hw_phy_config_ich8lan(sc, true);
11754 
11755 	/* Acquire PHY semaphore */
11756 	sc->phy.acquire(sc);
11757 
11758 	fwsm = CSR_READ(sc, WMREG_FWSM);
11759 	if (((fwsm & FWSM_FW_VALID) == 0)
11760 	    && ((wm_phy_resetisblocked(sc) == false))) {
11761 		if (sc->sc_type >= WM_T_PCH_LPT) {
11762 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
11763 			reg |= CTRL_EXT_FORCE_SMBUS;
11764 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11765 			CSR_WRITE_FLUSH(sc);
11766 			delay(50*1000);
11767 		}
11768 
11769 		/* Toggle LANPHYPC */
11770 		sc->sc_ctrl |= CTRL_LANPHYPC_OVERRIDE;
11771 		sc->sc_ctrl &= ~CTRL_LANPHYPC_VALUE;
11772 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11773 		CSR_WRITE_FLUSH(sc);
11774 		delay(1000);
11775 		sc->sc_ctrl &= ~CTRL_LANPHYPC_OVERRIDE;
11776 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11777 		CSR_WRITE_FLUSH(sc);
11778 		delay(50*1000);
11779 
11780 		if (sc->sc_type >= WM_T_PCH_LPT) {
11781 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
11782 			reg &= ~CTRL_EXT_FORCE_SMBUS;
11783 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11784 		}
11785 	}
11786 
11787 	/* Release semaphore */
11788 	sc->phy.release(sc);
11789 
11790 	/*
11791 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
11792 	 */
11793 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0))
11794 		wm_gate_hw_phy_config_ich8lan(sc, false);
11795 }
11796 
11797 static void
11798 wm_init_manageability(struct wm_softc *sc)
11799 {
11800 
11801 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11802 		device_xname(sc->sc_dev), __func__));
11803 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11804 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
11805 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11806 
11807 		/* Disable hardware interception of ARP */
11808 		manc &= ~MANC_ARP_EN;
11809 
11810 		/* Enable receiving management packets to the host */
11811 		if (sc->sc_type >= WM_T_82571) {
11812 			manc |= MANC_EN_MNG2HOST;
11813 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
11814 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
11815 		}
11816 
11817 		CSR_WRITE(sc, WMREG_MANC, manc);
11818 	}
11819 }
11820 
11821 static void
11822 wm_release_manageability(struct wm_softc *sc)
11823 {
11824 
11825 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
11826 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
11827 
11828 		manc |= MANC_ARP_EN;
11829 		if (sc->sc_type >= WM_T_82571)
11830 			manc &= ~MANC_EN_MNG2HOST;
11831 
11832 		CSR_WRITE(sc, WMREG_MANC, manc);
11833 	}
11834 }
11835 
11836 static void
11837 wm_get_wakeup(struct wm_softc *sc)
11838 {
11839 
11840 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
11841 	switch (sc->sc_type) {
11842 	case WM_T_82573:
11843 	case WM_T_82583:
11844 		sc->sc_flags |= WM_F_HAS_AMT;
11845 		/* FALLTHROUGH */
11846 	case WM_T_80003:
11847 	case WM_T_82541:
11848 	case WM_T_82547:
11849 	case WM_T_82571:
11850 	case WM_T_82572:
11851 	case WM_T_82574:
11852 	case WM_T_82575:
11853 	case WM_T_82576:
11854 	case WM_T_82580:
11855 	case WM_T_I350:
11856 	case WM_T_I354:
11857 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
11858 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
11859 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11860 		break;
11861 	case WM_T_ICH8:
11862 	case WM_T_ICH9:
11863 	case WM_T_ICH10:
11864 	case WM_T_PCH:
11865 	case WM_T_PCH2:
11866 	case WM_T_PCH_LPT:
11867 	case WM_T_PCH_SPT: /* XXX only Q170 chipset? */
11868 		sc->sc_flags |= WM_F_HAS_AMT;
11869 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
11870 		break;
11871 	default:
11872 		break;
11873 	}
11874 
11875 	/* 1: HAS_MANAGE */
11876 	if (wm_enable_mng_pass_thru(sc) != 0)
11877 		sc->sc_flags |= WM_F_HAS_MANAGE;
11878 
11879 #ifdef WM_DEBUG
11880 	printf("\n");
11881 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
11882 		printf("HAS_AMT,");
11883 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
11884 		printf("ARC_SUBSYS_VALID,");
11885 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
11886 		printf("ASF_FIRMWARE_PRES,");
11887 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
11888 		printf("HAS_MANAGE,");
11889 	printf("\n");
11890 #endif
11891 	/*
11892 	 * Note that the WOL flags is set after the resetting of the eeprom
11893 	 * stuff
11894 	 */
11895 }
11896 
11897 #ifdef WM_WOL
11898 /* WOL in the newer chipset interfaces (pchlan) */
11899 static void
11900 wm_enable_phy_wakeup(struct wm_softc *sc)
11901 {
11902 #if 0
11903 	uint16_t preg;
11904 
11905 	/* Copy MAC RARs to PHY RARs */
11906 
11907 	/* Copy MAC MTA to PHY MTA */
11908 
11909 	/* Configure PHY Rx Control register */
11910 
11911 	/* Enable PHY wakeup in MAC register */
11912 
11913 	/* Configure and enable PHY wakeup in PHY registers */
11914 
11915 	/* Activate PHY wakeup */
11916 
11917 	/* XXX */
11918 #endif
11919 }
11920 
11921 /* Power down workaround on D3 */
11922 static void
11923 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
11924 {
11925 	uint32_t reg;
11926 	int i;
11927 
11928 	for (i = 0; i < 2; i++) {
11929 		/* Disable link */
11930 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11931 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
11932 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11933 
11934 		/*
11935 		 * Call gig speed drop workaround on Gig disable before
11936 		 * accessing any PHY registers
11937 		 */
11938 		if (sc->sc_type == WM_T_ICH8)
11939 			wm_gig_downshift_workaround_ich8lan(sc);
11940 
11941 		/* Write VR power-down enable */
11942 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11943 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11944 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
11945 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
11946 
11947 		/* Read it back and test */
11948 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
11949 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
11950 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
11951 			break;
11952 
11953 		/* Issue PHY reset and repeat at most one more time */
11954 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
11955 	}
11956 }
11957 
11958 static void
11959 wm_enable_wakeup(struct wm_softc *sc)
11960 {
11961 	uint32_t reg, pmreg;
11962 	pcireg_t pmode;
11963 
11964 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
11965 		device_xname(sc->sc_dev), __func__));
11966 
11967 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
11968 		&pmreg, NULL) == 0)
11969 		return;
11970 
11971 	/* Advertise the wakeup capability */
11972 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
11973 	    | CTRL_SWDPIN(3));
11974 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
11975 
11976 	/* ICH workaround */
11977 	switch (sc->sc_type) {
11978 	case WM_T_ICH8:
11979 	case WM_T_ICH9:
11980 	case WM_T_ICH10:
11981 	case WM_T_PCH:
11982 	case WM_T_PCH2:
11983 	case WM_T_PCH_LPT:
11984 	case WM_T_PCH_SPT:
11985 		/* Disable gig during WOL */
11986 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
11987 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
11988 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
11989 		if (sc->sc_type == WM_T_PCH)
11990 			wm_gmii_reset(sc);
11991 
11992 		/* Power down workaround */
11993 		if (sc->sc_phytype == WMPHY_82577) {
11994 			struct mii_softc *child;
11995 
11996 			/* Assume that the PHY is copper */
11997 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
11998 			if (child->mii_mpd_rev <= 2)
11999 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
12000 				    (768 << 5) | 25, 0x0444); /* magic num */
12001 		}
12002 		break;
12003 	default:
12004 		break;
12005 	}
12006 
12007 	/* Keep the laser running on fiber adapters */
12008 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
12009 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12010 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12011 		reg |= CTRL_EXT_SWDPIN(3);
12012 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12013 	}
12014 
12015 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
12016 #if 0	/* for the multicast packet */
12017 	reg |= WUFC_MC;
12018 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
12019 #endif
12020 
12021 	if (sc->sc_type == WM_T_PCH) {
12022 		wm_enable_phy_wakeup(sc);
12023 	} else {
12024 		CSR_WRITE(sc, WMREG_WUC, WUC_PME_EN);
12025 		CSR_WRITE(sc, WMREG_WUFC, reg);
12026 	}
12027 
12028 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12029 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12030 		|| (sc->sc_type == WM_T_PCH2))
12031 		    && (sc->sc_phytype == WMPHY_IGP_3))
12032 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
12033 
12034 	/* Request PME */
12035 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
12036 #if 0
12037 	/* Disable WOL */
12038 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
12039 #else
12040 	/* For WOL */
12041 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
12042 #endif
12043 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
12044 }
12045 #endif /* WM_WOL */
12046 
12047 /* LPLU */
12048 
12049 static void
12050 wm_lplu_d0_disable(struct wm_softc *sc)
12051 {
12052 	uint32_t reg;
12053 
12054 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
12055 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
12056 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12057 }
12058 
12059 static void
12060 wm_lplu_d0_disable_pch(struct wm_softc *sc)
12061 {
12062 	uint32_t reg;
12063 
12064 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
12065 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
12066 	reg |= HV_OEM_BITS_ANEGNOW;
12067 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
12068 }
12069 
12070 /* EEE */
12071 
12072 static void
12073 wm_set_eee_i350(struct wm_softc *sc)
12074 {
12075 	uint32_t ipcnfg, eeer;
12076 
12077 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
12078 	eeer = CSR_READ(sc, WMREG_EEER);
12079 
12080 	if ((sc->sc_flags & WM_F_EEE) != 0) {
12081 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12082 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
12083 		    | EEER_LPI_FC);
12084 	} else {
12085 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12086 		ipcnfg &= ~IPCNFG_10BASE_TE;
12087 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
12088 		    | EEER_LPI_FC);
12089 	}
12090 
12091 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
12092 	CSR_WRITE(sc, WMREG_EEER, eeer);
12093 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
12094 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
12095 }
12096 
12097 /*
12098  * Workarounds (mainly PHY related).
12099  * Basically, PHY's workarounds are in the PHY drivers.
12100  */
12101 
12102 /* Work-around for 82566 Kumeran PCS lock loss */
12103 static void
12104 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
12105 {
12106 #if 0
12107 	int miistatus, active, i;
12108 	int reg;
12109 
12110 	miistatus = sc->sc_mii.mii_media_status;
12111 
12112 	/* If the link is not up, do nothing */
12113 	if ((miistatus & IFM_ACTIVE) == 0)
12114 		return;
12115 
12116 	active = sc->sc_mii.mii_media_active;
12117 
12118 	/* Nothing to do if the link is other than 1Gbps */
12119 	if (IFM_SUBTYPE(active) != IFM_1000_T)
12120 		return;
12121 
12122 	for (i = 0; i < 10; i++) {
12123 		/* read twice */
12124 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12125 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12126 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
12127 			goto out;	/* GOOD! */
12128 
12129 		/* Reset the PHY */
12130 		wm_gmii_reset(sc);
12131 		delay(5*1000);
12132 	}
12133 
12134 	/* Disable GigE link negotiation */
12135 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
12136 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12137 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12138 
12139 	/*
12140 	 * Call gig speed drop workaround on Gig disable before accessing
12141 	 * any PHY registers.
12142 	 */
12143 	wm_gig_downshift_workaround_ich8lan(sc);
12144 
12145 out:
12146 	return;
12147 #endif
12148 }
12149 
12150 /* WOL from S5 stops working */
12151 static void
12152 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
12153 {
12154 	uint16_t kmrn_reg;
12155 
12156 	/* Only for igp3 */
12157 	if (sc->sc_phytype == WMPHY_IGP_3) {
12158 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
12159 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
12160 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12161 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
12162 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12163 	}
12164 }
12165 
12166 /*
12167  * Workaround for pch's PHYs
12168  * XXX should be moved to new PHY driver?
12169  */
12170 static void
12171 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
12172 {
12173 
12174 	KASSERT(sc->sc_type == WM_T_PCH);
12175 
12176 	if (sc->sc_phytype == WMPHY_82577)
12177 		wm_set_mdio_slow_mode_hv(sc);
12178 
12179 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
12180 
12181 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
12182 
12183 	/* 82578 */
12184 	if (sc->sc_phytype == WMPHY_82578) {
12185 		/* PCH rev. < 3 */
12186 		if (sc->sc_rev < 3) {
12187 			/* XXX 6 bit shift? Why? Is it page2? */
12188 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x29),
12189 			    0x66c0);
12190 			wm_gmii_hv_writereg(sc->sc_dev, 1, ((1 << 6) | 0x1e),
12191 			    0xffff);
12192 		}
12193 
12194 		/* XXX phy rev. < 2 */
12195 	}
12196 
12197 	/* Select page 0 */
12198 
12199 	sc->phy.acquire(sc);
12200 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
12201 	sc->phy.release(sc);
12202 
12203 	/*
12204 	 * Configure the K1 Si workaround during phy reset assuming there is
12205 	 * link so that it disables K1 if link is in 1Gbps.
12206 	 */
12207 	wm_k1_gig_workaround_hv(sc, 1);
12208 }
12209 
12210 static void
12211 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
12212 {
12213 
12214 	KASSERT(sc->sc_type == WM_T_PCH2);
12215 
12216 	wm_set_mdio_slow_mode_hv(sc);
12217 }
12218 
12219 static int
12220 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
12221 {
12222 	int k1_enable = sc->sc_nvm_k1_enabled;
12223 
12224 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12225 		device_xname(sc->sc_dev), __func__));
12226 
12227 	if (sc->phy.acquire(sc) != 0)
12228 		return -1;
12229 
12230 	if (link) {
12231 		k1_enable = 0;
12232 
12233 		/* Link stall fix for link up */
12234 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
12235 	} else {
12236 		/* Link stall fix for link down */
12237 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
12238 	}
12239 
12240 	wm_configure_k1_ich8lan(sc, k1_enable);
12241 	sc->phy.release(sc);
12242 
12243 	return 0;
12244 }
12245 
12246 static void
12247 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
12248 {
12249 	uint32_t reg;
12250 
12251 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
12252 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
12253 	    reg | HV_KMRN_MDIO_SLOW);
12254 }
12255 
12256 static void
12257 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
12258 {
12259 	uint32_t ctrl, ctrl_ext, tmp;
12260 	uint16_t kmrn_reg;
12261 
12262 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
12263 
12264 	if (k1_enable)
12265 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
12266 	else
12267 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
12268 
12269 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
12270 
12271 	delay(20);
12272 
12273 	ctrl = CSR_READ(sc, WMREG_CTRL);
12274 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12275 
12276 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
12277 	tmp |= CTRL_FRCSPD;
12278 
12279 	CSR_WRITE(sc, WMREG_CTRL, tmp);
12280 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
12281 	CSR_WRITE_FLUSH(sc);
12282 	delay(20);
12283 
12284 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
12285 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12286 	CSR_WRITE_FLUSH(sc);
12287 	delay(20);
12288 }
12289 
12290 /* special case - for 82575 - need to do manual init ... */
12291 static void
12292 wm_reset_init_script_82575(struct wm_softc *sc)
12293 {
12294 	/*
12295 	 * remark: this is untested code - we have no board without EEPROM
12296 	 *  same setup as mentioned int the FreeBSD driver for the i82575
12297 	 */
12298 
12299 	/* SerDes configuration via SERDESCTRL */
12300 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
12301 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
12302 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
12303 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
12304 
12305 	/* CCM configuration via CCMCTL register */
12306 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
12307 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
12308 
12309 	/* PCIe lanes configuration */
12310 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
12311 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
12312 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
12313 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
12314 
12315 	/* PCIe PLL Configuration */
12316 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
12317 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
12318 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
12319 }
12320 
12321 static void
12322 wm_reset_mdicnfg_82580(struct wm_softc *sc)
12323 {
12324 	uint32_t reg;
12325 	uint16_t nvmword;
12326 	int rv;
12327 
12328 	if ((sc->sc_flags & WM_F_SGMII) == 0)
12329 		return;
12330 
12331 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
12332 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
12333 	if (rv != 0) {
12334 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
12335 		    __func__);
12336 		return;
12337 	}
12338 
12339 	reg = CSR_READ(sc, WMREG_MDICNFG);
12340 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
12341 		reg |= MDICNFG_DEST;
12342 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
12343 		reg |= MDICNFG_COM_MDIO;
12344 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
12345 }
12346 
12347 /*
12348  * I210 Errata 25 and I211 Errata 10
12349  * Slow System Clock.
12350  */
12351 static void
12352 wm_pll_workaround_i210(struct wm_softc *sc)
12353 {
12354 	uint32_t mdicnfg, wuc;
12355 	uint32_t reg;
12356 	pcireg_t pcireg;
12357 	uint32_t pmreg;
12358 	uint16_t nvmword, tmp_nvmword;
12359 	int phyval;
12360 	bool wa_done = false;
12361 	int i;
12362 
12363 	/* Save WUC and MDICNFG registers */
12364 	wuc = CSR_READ(sc, WMREG_WUC);
12365 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
12366 
12367 	reg = mdicnfg & ~MDICNFG_DEST;
12368 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
12369 
12370 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
12371 		nvmword = INVM_DEFAULT_AL;
12372 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
12373 
12374 	/* Get Power Management cap offset */
12375 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12376 		&pmreg, NULL) == 0)
12377 		return;
12378 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
12379 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
12380 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
12381 
12382 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
12383 			break; /* OK */
12384 		}
12385 
12386 		wa_done = true;
12387 		/* Directly reset the internal PHY */
12388 		reg = CSR_READ(sc, WMREG_CTRL);
12389 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
12390 
12391 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12392 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
12393 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12394 
12395 		CSR_WRITE(sc, WMREG_WUC, 0);
12396 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
12397 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12398 
12399 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
12400 		    pmreg + PCI_PMCSR);
12401 		pcireg |= PCI_PMCSR_STATE_D3;
12402 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12403 		    pmreg + PCI_PMCSR, pcireg);
12404 		delay(1000);
12405 		pcireg &= ~PCI_PMCSR_STATE_D3;
12406 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
12407 		    pmreg + PCI_PMCSR, pcireg);
12408 
12409 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
12410 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
12411 
12412 		/* Restore WUC register */
12413 		CSR_WRITE(sc, WMREG_WUC, wuc);
12414 	}
12415 
12416 	/* Restore MDICNFG setting */
12417 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
12418 	if (wa_done)
12419 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
12420 }
12421