xref: /netbsd-src/sys/dev/pci/if_wm.c (revision c34236556bea94afcaca1782d7d228301edc3ea0)
1 /*	$NetBSD: if_wm.c,v 1.462 2017/01/06 08:05:26 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- Disable D0 LPLU on 8257[12356], 82580 and I350.
77  *	- TX Multi queue improvement (refine queue selection logic)
78  *	- Advanced Receive Descriptor
79  *	- EEE (Energy Efficiency Ethernet)
80  *	- Virtual Function
81  *	- Set LED correctly (based on contents in EEPROM)
82  *	- Rework how parameters are loaded from the EEPROM.
83  *	- Image Unique ID
84  */
85 
86 #include <sys/cdefs.h>
87 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.462 2017/01/06 08:05:26 msaitoh Exp $");
88 
89 #ifdef _KERNEL_OPT
90 #include "opt_net_mpsafe.h"
91 #endif
92 
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/mbuf.h>
97 #include <sys/malloc.h>
98 #include <sys/kmem.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 #include <sys/ioctl.h>
102 #include <sys/errno.h>
103 #include <sys/device.h>
104 #include <sys/queue.h>
105 #include <sys/syslog.h>
106 #include <sys/interrupt.h>
107 #include <sys/cpu.h>
108 #include <sys/pcq.h>
109 
110 #include <sys/rndsource.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <netinet/in.h>			/* XXX for struct ip */
120 #include <netinet/in_systm.h>		/* XXX for struct ip */
121 #include <netinet/ip.h>			/* XXX for struct ip */
122 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
123 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
124 
125 #include <sys/bus.h>
126 #include <sys/intr.h>
127 #include <machine/endian.h>
128 
129 #include <dev/mii/mii.h>
130 #include <dev/mii/miivar.h>
131 #include <dev/mii/miidevs.h>
132 #include <dev/mii/mii_bitbang.h>
133 #include <dev/mii/ikphyreg.h>
134 #include <dev/mii/igphyreg.h>
135 #include <dev/mii/igphyvar.h>
136 #include <dev/mii/inbmphyreg.h>
137 
138 #include <dev/pci/pcireg.h>
139 #include <dev/pci/pcivar.h>
140 #include <dev/pci/pcidevs.h>
141 
142 #include <dev/pci/if_wmreg.h>
143 #include <dev/pci/if_wmvar.h>
144 
145 #ifdef WM_DEBUG
146 #define	WM_DEBUG_LINK		__BIT(0)
147 #define	WM_DEBUG_TX		__BIT(1)
148 #define	WM_DEBUG_RX		__BIT(2)
149 #define	WM_DEBUG_GMII		__BIT(3)
150 #define	WM_DEBUG_MANAGE		__BIT(4)
151 #define	WM_DEBUG_NVM		__BIT(5)
152 #define	WM_DEBUG_INIT		__BIT(6)
153 #define	WM_DEBUG_LOCK		__BIT(7)
154 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
155     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
156 
157 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
158 #else
159 #define	DPRINTF(x, y)	/* nothing */
160 #endif /* WM_DEBUG */
161 
162 #ifdef NET_MPSAFE
163 #define WM_MPSAFE	1
164 #endif
165 
166 /*
167  * This device driver's max interrupt numbers.
168  */
169 #define WM_MAX_NQUEUEINTR	16
170 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
171 
172 /*
173  * Transmit descriptor list size.  Due to errata, we can only have
174  * 256 hardware descriptors in the ring on < 82544, but we use 4096
175  * on >= 82544.  We tell the upper layers that they can queue a lot
176  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
177  * of them at a time.
178  *
179  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
180  * chains containing many small mbufs have been observed in zero-copy
181  * situations with jumbo frames.
182  */
183 #define	WM_NTXSEGS		256
184 #define	WM_IFQUEUELEN		256
185 #define	WM_TXQUEUELEN_MAX	64
186 #define	WM_TXQUEUELEN_MAX_82547	16
187 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
188 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
189 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
190 #define	WM_NTXDESC_82542	256
191 #define	WM_NTXDESC_82544	4096
192 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
193 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
194 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
195 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
196 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
197 
198 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
199 
200 #define	WM_TXINTERQSIZE		256
201 
202 /*
203  * Receive descriptor list size.  We have one Rx buffer for normal
204  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
205  * packet.  We allocate 256 receive descriptors, each with a 2k
206  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
207  */
208 #define	WM_NRXDESC		256
209 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
210 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
211 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
212 
213 typedef union txdescs {
214 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
215 	nq_txdesc_t      sctxu_nq_txdescs[WM_NTXDESC_82544];
216 } txdescs_t;
217 
218 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
219 #define	WM_CDRXOFF(x)	(sizeof(wiseman_rxdesc_t) * x)
220 
221 /*
222  * Software state for transmit jobs.
223  */
224 struct wm_txsoft {
225 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
226 	bus_dmamap_t txs_dmamap;	/* our DMA map */
227 	int txs_firstdesc;		/* first descriptor in packet */
228 	int txs_lastdesc;		/* last descriptor in packet */
229 	int txs_ndesc;			/* # of descriptors used */
230 };
231 
232 /*
233  * Software state for receive buffers.  Each descriptor gets a
234  * 2k (MCLBYTES) buffer and a DMA map.  For packets which fill
235  * more than one buffer, we chain them together.
236  */
237 struct wm_rxsoft {
238 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
239 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
240 };
241 
242 #define WM_LINKUP_TIMEOUT	50
243 
244 static uint16_t swfwphysem[] = {
245 	SWFW_PHY0_SM,
246 	SWFW_PHY1_SM,
247 	SWFW_PHY2_SM,
248 	SWFW_PHY3_SM
249 };
250 
251 static const uint32_t wm_82580_rxpbs_table[] = {
252 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
253 };
254 
255 struct wm_softc;
256 
257 #ifdef WM_EVENT_COUNTERS
258 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
259 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
260 	struct evcnt qname##_ev_##evname;
261 
262 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
263 	do{								\
264 		snprintf((q)->qname##_##evname##_evcnt_name,		\
265 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
266 		    "%s%02d%s", #qname, (qnum), #evname);		\
267 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
268 		    (evtype), NULL, (xname),				\
269 		    (q)->qname##_##evname##_evcnt_name);		\
270 	}while(0)
271 
272 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
273 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
274 
275 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
276 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
277 #endif /* WM_EVENT_COUNTERS */
278 
279 struct wm_txqueue {
280 	kmutex_t *txq_lock;		/* lock for tx operations */
281 
282 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
283 
284 	/* Software state for the transmit descriptors. */
285 	int txq_num;			/* must be a power of two */
286 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
287 
288 	/* TX control data structures. */
289 	int txq_ndesc;			/* must be a power of two */
290 	size_t txq_descsize;		/* a tx descriptor size */
291 	txdescs_t *txq_descs_u;
292         bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
293 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
294 	int txq_desc_rseg;		/* real number of control segment */
295 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
296 #define	txq_descs	txq_descs_u->sctxu_txdescs
297 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
298 
299 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
300 
301 	int txq_free;			/* number of free Tx descriptors */
302 	int txq_next;			/* next ready Tx descriptor */
303 
304 	int txq_sfree;			/* number of free Tx jobs */
305 	int txq_snext;			/* next free Tx job */
306 	int txq_sdirty;			/* dirty Tx jobs */
307 
308 	/* These 4 variables are used only on the 82547. */
309 	int txq_fifo_size;		/* Tx FIFO size */
310 	int txq_fifo_head;		/* current head of FIFO */
311 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
312 	int txq_fifo_stall;		/* Tx FIFO is stalled */
313 
314 	/*
315 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
316 	 * CPUs. This queue intermediate them without block.
317 	 */
318 	pcq_t *txq_interq;
319 
320 	/*
321 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
322 	 * to manage Tx H/W queue's busy flag.
323 	 */
324 	int txq_flags;			/* flags for H/W queue, see below */
325 #define	WM_TXQ_NO_SPACE	0x1
326 
327 	bool txq_stopping;
328 
329 #ifdef WM_EVENT_COUNTERS
330 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
331 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
332 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
333 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
334 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
335 						/* XXX not used? */
336 
337 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
338 	WM_Q_EVCNT_DEFINE(txq,txtusum)		/* TCP/UDP cksums comp. out-bound */
339 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
340 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
341 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
342 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
343 
344 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
345 
346 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
347 
348 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
349 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
350 #endif /* WM_EVENT_COUNTERS */
351 };
352 
353 struct wm_rxqueue {
354 	kmutex_t *rxq_lock;		/* lock for rx operations */
355 
356 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
357 
358 	/* Software state for the receive descriptors. */
359 	wiseman_rxdesc_t *rxq_descs;
360 
361 	/* RX control data structures. */
362 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
363 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
364 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
365 	int rxq_desc_rseg;		/* real number of control segment */
366 	size_t rxq_desc_size;		/* control data size */
367 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
368 
369 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
370 
371 	int rxq_ptr;			/* next ready Rx desc/queue ent */
372 	int rxq_discard;
373 	int rxq_len;
374 	struct mbuf *rxq_head;
375 	struct mbuf *rxq_tail;
376 	struct mbuf **rxq_tailp;
377 
378 	bool rxq_stopping;
379 
380 #ifdef WM_EVENT_COUNTERS
381 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
382 
383 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
384 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
385 #endif
386 };
387 
388 struct wm_queue {
389 	int wmq_id;			/* index of transmit and receive queues */
390 	int wmq_intr_idx;		/* index of MSI-X tables */
391 
392 	struct wm_txqueue wmq_txq;
393 	struct wm_rxqueue wmq_rxq;
394 };
395 
396 struct wm_phyop {
397 	int (*acquire)(struct wm_softc *);
398 	void (*release)(struct wm_softc *);
399 	int reset_delay_us;
400 };
401 
402 /*
403  * Software state per device.
404  */
405 struct wm_softc {
406 	device_t sc_dev;		/* generic device information */
407 	bus_space_tag_t sc_st;		/* bus space tag */
408 	bus_space_handle_t sc_sh;	/* bus space handle */
409 	bus_size_t sc_ss;		/* bus space size */
410 	bus_space_tag_t sc_iot;		/* I/O space tag */
411 	bus_space_handle_t sc_ioh;	/* I/O space handle */
412 	bus_size_t sc_ios;		/* I/O space size */
413 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
414 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
415 	bus_size_t sc_flashs;		/* flash registers space size */
416 	off_t sc_flashreg_offset;	/*
417 					 * offset to flash registers from
418 					 * start of BAR
419 					 */
420 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
421 
422 	struct ethercom sc_ethercom;	/* ethernet common data */
423 	struct mii_data sc_mii;		/* MII/media information */
424 
425 	pci_chipset_tag_t sc_pc;
426 	pcitag_t sc_pcitag;
427 	int sc_bus_speed;		/* PCI/PCIX bus speed */
428 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
429 
430 	uint16_t sc_pcidevid;		/* PCI device ID */
431 	wm_chip_type sc_type;		/* MAC type */
432 	int sc_rev;			/* MAC revision */
433 	wm_phy_type sc_phytype;		/* PHY type */
434 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
435 #define	WM_MEDIATYPE_UNKNOWN		0x00
436 #define	WM_MEDIATYPE_FIBER		0x01
437 #define	WM_MEDIATYPE_COPPER		0x02
438 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
439 	int sc_funcid;			/* unit number of the chip (0 to 3) */
440 	int sc_flags;			/* flags; see below */
441 	int sc_if_flags;		/* last if_flags */
442 	int sc_flowflags;		/* 802.3x flow control flags */
443 	int sc_align_tweak;
444 
445 	void *sc_ihs[WM_MAX_NINTR];	/*
446 					 * interrupt cookie.
447 					 * legacy and msi use sc_ihs[0].
448 					 */
449 	pci_intr_handle_t *sc_intrs;	/* legacy and msi use sc_intrs[0] */
450 	int sc_nintrs;			/* number of interrupts */
451 
452 	int sc_link_intr_idx;		/* index of MSI-X tables */
453 
454 	callout_t sc_tick_ch;		/* tick callout */
455 	bool sc_core_stopping;
456 
457 	int sc_nvm_ver_major;
458 	int sc_nvm_ver_minor;
459 	int sc_nvm_ver_build;
460 	int sc_nvm_addrbits;		/* NVM address bits */
461 	unsigned int sc_nvm_wordsize;	/* NVM word size */
462 	int sc_ich8_flash_base;
463 	int sc_ich8_flash_bank_size;
464 	int sc_nvm_k1_enabled;
465 
466 	int sc_nqueues;
467 	struct wm_queue *sc_queue;
468 
469 	int sc_affinity_offset;
470 
471 #ifdef WM_EVENT_COUNTERS
472 	/* Event counters. */
473 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
474 
475         /* WM_T_82542_2_1 only */
476 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
477 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
478 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
479 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
480 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
481 #endif /* WM_EVENT_COUNTERS */
482 
483 	/* This variable are used only on the 82547. */
484 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
485 
486 	uint32_t sc_ctrl;		/* prototype CTRL register */
487 #if 0
488 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
489 #endif
490 	uint32_t sc_icr;		/* prototype interrupt bits */
491 	uint32_t sc_itr;		/* prototype intr throttling reg */
492 	uint32_t sc_tctl;		/* prototype TCTL register */
493 	uint32_t sc_rctl;		/* prototype RCTL register */
494 	uint32_t sc_txcw;		/* prototype TXCW register */
495 	uint32_t sc_tipg;		/* prototype TIPG register */
496 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
497 	uint32_t sc_pba;		/* prototype PBA register */
498 
499 	int sc_tbi_linkup;		/* TBI link status */
500 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
501 	int sc_tbi_serdes_ticks;	/* tbi ticks */
502 
503 	int sc_mchash_type;		/* multicast filter offset */
504 
505 	krndsource_t rnd_source;	/* random source */
506 
507 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
508 
509 	kmutex_t *sc_core_lock;		/* lock for softc operations */
510 	kmutex_t *sc_ich_phymtx;	/*
511 					 * 82574/82583/ICH/PCH specific PHY
512 					 * mutex. For 82574/82583, the mutex
513 					 * is used for both PHY and NVM.
514 					 */
515 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
516 
517 	struct wm_phyop phy;
518 };
519 
520 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
521 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
522 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
523 
524 #ifdef WM_MPSAFE
525 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
526 #else
527 #define CALLOUT_FLAGS	0
528 #endif
529 
530 #define	WM_RXCHAIN_RESET(rxq)						\
531 do {									\
532 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
533 	*(rxq)->rxq_tailp = NULL;					\
534 	(rxq)->rxq_len = 0;						\
535 } while (/*CONSTCOND*/0)
536 
537 #define	WM_RXCHAIN_LINK(rxq, m)						\
538 do {									\
539 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
540 	(rxq)->rxq_tailp = &(m)->m_next;				\
541 } while (/*CONSTCOND*/0)
542 
543 #ifdef WM_EVENT_COUNTERS
544 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
545 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
546 
547 #define WM_Q_EVCNT_INCR(qname, evname)			\
548 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
549 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
550 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
551 #else /* !WM_EVENT_COUNTERS */
552 #define	WM_EVCNT_INCR(ev)	/* nothing */
553 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
554 
555 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
556 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
557 #endif /* !WM_EVENT_COUNTERS */
558 
559 #define	CSR_READ(sc, reg)						\
560 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
561 #define	CSR_WRITE(sc, reg, val)						\
562 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
563 #define	CSR_WRITE_FLUSH(sc)						\
564 	(void) CSR_READ((sc), WMREG_STATUS)
565 
566 #define ICH8_FLASH_READ32(sc, reg)					\
567 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
568 	    (reg) + sc->sc_flashreg_offset)
569 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
570 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
571 	    (reg) + sc->sc_flashreg_offset, (data))
572 
573 #define ICH8_FLASH_READ16(sc, reg)					\
574 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
575 	    (reg) + sc->sc_flashreg_offset)
576 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
577 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
578 	    (reg) + sc->sc_flashreg_offset, (data))
579 
580 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
581 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((x)))
582 
583 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
584 #define	WM_CDTXADDR_HI(txq, x)						\
585 	(sizeof(bus_addr_t) == 8 ?					\
586 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
587 
588 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
589 #define	WM_CDRXADDR_HI(rxq, x)						\
590 	(sizeof(bus_addr_t) == 8 ?					\
591 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
592 
593 /*
594  * Register read/write functions.
595  * Other than CSR_{READ|WRITE}().
596  */
597 #if 0
598 static inline uint32_t wm_io_read(struct wm_softc *, int);
599 #endif
600 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
601 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
602 	uint32_t, uint32_t);
603 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
604 
605 /*
606  * Descriptor sync/init functions.
607  */
608 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
609 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
610 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
611 
612 /*
613  * Device driver interface functions and commonly used functions.
614  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
615  */
616 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
617 static int	wm_match(device_t, cfdata_t, void *);
618 static void	wm_attach(device_t, device_t, void *);
619 static int	wm_detach(device_t, int);
620 static bool	wm_suspend(device_t, const pmf_qual_t *);
621 static bool	wm_resume(device_t, const pmf_qual_t *);
622 static void	wm_watchdog(struct ifnet *);
623 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *);
624 static void	wm_tick(void *);
625 static int	wm_ifflags_cb(struct ethercom *);
626 static int	wm_ioctl(struct ifnet *, u_long, void *);
627 /* MAC address related */
628 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
629 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
630 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
631 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
632 static void	wm_set_filter(struct wm_softc *);
633 /* Reset and init related */
634 static void	wm_set_vlan(struct wm_softc *);
635 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
636 static void	wm_get_auto_rd_done(struct wm_softc *);
637 static void	wm_lan_init_done(struct wm_softc *);
638 static void	wm_get_cfg_done(struct wm_softc *);
639 static void	wm_initialize_hardware_bits(struct wm_softc *);
640 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
641 static void	wm_reset_phy(struct wm_softc *);
642 static void	wm_flush_desc_rings(struct wm_softc *);
643 static void	wm_reset(struct wm_softc *);
644 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
645 static void	wm_rxdrain(struct wm_rxqueue *);
646 static void	wm_rss_getkey(uint8_t *);
647 static void	wm_init_rss(struct wm_softc *);
648 static void	wm_adjust_qnum(struct wm_softc *, int);
649 static int	wm_setup_legacy(struct wm_softc *);
650 static int	wm_setup_msix(struct wm_softc *);
651 static int	wm_init(struct ifnet *);
652 static int	wm_init_locked(struct ifnet *);
653 static void	wm_turnon(struct wm_softc *);
654 static void	wm_turnoff(struct wm_softc *);
655 static void	wm_stop(struct ifnet *, int);
656 static void	wm_stop_locked(struct ifnet *, int);
657 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
658 static void	wm_82547_txfifo_stall(void *);
659 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
660 /* DMA related */
661 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
662 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
663 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
664 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
665     struct wm_txqueue *);
666 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
667 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
668 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
669     struct wm_rxqueue *);
670 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
671 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
672 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
673 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
674 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
675 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
676 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
677     struct wm_txqueue *);
678 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
679     struct wm_rxqueue *);
680 static int	wm_alloc_txrx_queues(struct wm_softc *);
681 static void	wm_free_txrx_queues(struct wm_softc *);
682 static int	wm_init_txrx_queues(struct wm_softc *);
683 /* Start */
684 static int	wm_tx_offload(struct wm_softc *, struct wm_txsoft *,
685     uint32_t *, uint8_t *);
686 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
687 static void	wm_start(struct ifnet *);
688 static void	wm_start_locked(struct ifnet *);
689 static int	wm_transmit(struct ifnet *, struct mbuf *);
690 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
691 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
692 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
693     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
694 static void	wm_nq_start(struct ifnet *);
695 static void	wm_nq_start_locked(struct ifnet *);
696 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
697 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
698 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *, bool);
699 static void	wm_deferred_start(struct ifnet *);
700 /* Interrupt */
701 static int	wm_txeof(struct wm_softc *, struct wm_txqueue *);
702 static void	wm_rxeof(struct wm_rxqueue *);
703 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
704 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
705 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
706 static void	wm_linkintr(struct wm_softc *, uint32_t);
707 static int	wm_intr_legacy(void *);
708 static int	wm_txrxintr_msix(void *);
709 static int	wm_linkintr_msix(void *);
710 
711 /*
712  * Media related.
713  * GMII, SGMII, TBI, SERDES and SFP.
714  */
715 /* Common */
716 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
717 /* GMII related */
718 static void	wm_gmii_reset(struct wm_softc *);
719 static int	wm_get_phy_id_82575(struct wm_softc *);
720 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
721 static int	wm_gmii_mediachange(struct ifnet *);
722 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
723 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
724 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
725 static int	wm_gmii_i82543_readreg(device_t, int, int);
726 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
727 static int	wm_gmii_mdic_readreg(device_t, int, int);
728 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
729 static int	wm_gmii_i82544_readreg(device_t, int, int);
730 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
731 static int	wm_gmii_i80003_readreg(device_t, int, int);
732 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
733 static int	wm_gmii_bm_readreg(device_t, int, int);
734 static void	wm_gmii_bm_writereg(device_t, int, int, int);
735 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
736 static int	wm_gmii_hv_readreg(device_t, int, int);
737 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
738 static void	wm_gmii_hv_writereg(device_t, int, int, int);
739 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
740 static int	wm_gmii_82580_readreg(device_t, int, int);
741 static void	wm_gmii_82580_writereg(device_t, int, int, int);
742 static int	wm_gmii_gs40g_readreg(device_t, int, int);
743 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
744 static void	wm_gmii_statchg(struct ifnet *);
745 /*
746  * kumeran related (80003, ICH* and PCH*).
747  * These functions are not for accessing MII registers but for accessing
748  * kumeran specific registers.
749  */
750 static int	wm_kmrn_readreg(struct wm_softc *, int);
751 static int	wm_kmrn_readreg_locked(struct wm_softc *, int);
752 static void	wm_kmrn_writereg(struct wm_softc *, int, int);
753 static void	wm_kmrn_writereg_locked(struct wm_softc *, int, int);
754 /* SGMII */
755 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
756 static int	wm_sgmii_readreg(device_t, int, int);
757 static void	wm_sgmii_writereg(device_t, int, int, int);
758 /* TBI related */
759 static void	wm_tbi_mediainit(struct wm_softc *);
760 static int	wm_tbi_mediachange(struct ifnet *);
761 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
762 static int	wm_check_for_link(struct wm_softc *);
763 static void	wm_tbi_tick(struct wm_softc *);
764 /* SERDES related */
765 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
766 static int	wm_serdes_mediachange(struct ifnet *);
767 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
768 static void	wm_serdes_tick(struct wm_softc *);
769 /* SFP related */
770 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
771 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
772 
773 /*
774  * NVM related.
775  * Microwire, SPI (w/wo EERD) and Flash.
776  */
777 /* Misc functions */
778 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
779 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
780 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
781 /* Microwire */
782 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
783 /* SPI */
784 static int	wm_nvm_ready_spi(struct wm_softc *);
785 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
786 /* Using with EERD */
787 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
788 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
789 /* Flash */
790 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
791     unsigned int *);
792 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
793 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
794 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
795 	uint32_t *);
796 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
797 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
798 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
799 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
800 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
801 /* iNVM */
802 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
803 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
804 /* Lock, detecting NVM type, validate checksum and read */
805 static int	wm_nvm_acquire(struct wm_softc *);
806 static void	wm_nvm_release(struct wm_softc *);
807 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
808 static int	wm_nvm_get_flash_presence_i210(struct wm_softc *);
809 static int	wm_nvm_validate_checksum(struct wm_softc *);
810 static void	wm_nvm_version_invm(struct wm_softc *);
811 static void	wm_nvm_version(struct wm_softc *);
812 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
813 
814 /*
815  * Hardware semaphores.
816  * Very complexed...
817  */
818 static int	wm_get_null(struct wm_softc *);
819 static void	wm_put_null(struct wm_softc *);
820 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
821 static void	wm_put_swsm_semaphore(struct wm_softc *);
822 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
823 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
824 static int	wm_get_phy_82575(struct wm_softc *);
825 static void	wm_put_phy_82575(struct wm_softc *);
826 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
827 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
828 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
829 static void	wm_put_swflag_ich8lan(struct wm_softc *);
830 static int	wm_get_nvm_ich8lan(struct wm_softc *);		/* For NVM */
831 static void	wm_put_nvm_ich8lan(struct wm_softc *);
832 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
833 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
834 
835 /*
836  * Management mode and power management related subroutines.
837  * BMC, AMT, suspend/resume and EEE.
838  */
839 #if 0
840 static int	wm_check_mng_mode(struct wm_softc *);
841 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
842 static int	wm_check_mng_mode_82574(struct wm_softc *);
843 static int	wm_check_mng_mode_generic(struct wm_softc *);
844 #endif
845 static int	wm_enable_mng_pass_thru(struct wm_softc *);
846 static bool	wm_phy_resetisblocked(struct wm_softc *);
847 static void	wm_get_hw_control(struct wm_softc *);
848 static void	wm_release_hw_control(struct wm_softc *);
849 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
850 static void	wm_smbustopci(struct wm_softc *);
851 static void	wm_init_manageability(struct wm_softc *);
852 static void	wm_release_manageability(struct wm_softc *);
853 static void	wm_get_wakeup(struct wm_softc *);
854 static void	wm_ulp_disable(struct wm_softc *);
855 static void	wm_enable_phy_wakeup(struct wm_softc *);
856 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
857 static void	wm_enable_wakeup(struct wm_softc *);
858 /* LPLU (Low Power Link Up) */
859 static void	wm_lplu_d0_disable(struct wm_softc *);
860 static void	wm_lplu_d0_disable_pch(struct wm_softc *);
861 /* EEE */
862 static void	wm_set_eee_i350(struct wm_softc *);
863 
864 /*
865  * Workarounds (mainly PHY related).
866  * Basically, PHY's workarounds are in the PHY drivers.
867  */
868 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
869 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
870 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
871 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
872 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
873 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
874 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
875 static void	wm_reset_init_script_82575(struct wm_softc *);
876 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
877 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
878 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
879 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
880 static void	wm_pll_workaround_i210(struct wm_softc *);
881 
882 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
883     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
884 
885 /*
886  * Devices supported by this driver.
887  */
888 static const struct wm_product {
889 	pci_vendor_id_t		wmp_vendor;
890 	pci_product_id_t	wmp_product;
891 	const char		*wmp_name;
892 	wm_chip_type		wmp_type;
893 	uint32_t		wmp_flags;
894 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
895 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
896 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
897 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
898 #define WMP_MEDIATYPE(x)	((x) & 0x03)
899 } wm_products[] = {
900 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
901 	  "Intel i82542 1000BASE-X Ethernet",
902 	  WM_T_82542_2_1,	WMP_F_FIBER },
903 
904 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
905 	  "Intel i82543GC 1000BASE-X Ethernet",
906 	  WM_T_82543,		WMP_F_FIBER },
907 
908 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
909 	  "Intel i82543GC 1000BASE-T Ethernet",
910 	  WM_T_82543,		WMP_F_COPPER },
911 
912 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
913 	  "Intel i82544EI 1000BASE-T Ethernet",
914 	  WM_T_82544,		WMP_F_COPPER },
915 
916 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
917 	  "Intel i82544EI 1000BASE-X Ethernet",
918 	  WM_T_82544,		WMP_F_FIBER },
919 
920 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
921 	  "Intel i82544GC 1000BASE-T Ethernet",
922 	  WM_T_82544,		WMP_F_COPPER },
923 
924 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
925 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
926 	  WM_T_82544,		WMP_F_COPPER },
927 
928 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
929 	  "Intel i82540EM 1000BASE-T Ethernet",
930 	  WM_T_82540,		WMP_F_COPPER },
931 
932 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
933 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
934 	  WM_T_82540,		WMP_F_COPPER },
935 
936 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
937 	  "Intel i82540EP 1000BASE-T Ethernet",
938 	  WM_T_82540,		WMP_F_COPPER },
939 
940 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
941 	  "Intel i82540EP 1000BASE-T Ethernet",
942 	  WM_T_82540,		WMP_F_COPPER },
943 
944 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
945 	  "Intel i82540EP 1000BASE-T Ethernet",
946 	  WM_T_82540,		WMP_F_COPPER },
947 
948 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
949 	  "Intel i82545EM 1000BASE-T Ethernet",
950 	  WM_T_82545,		WMP_F_COPPER },
951 
952 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
953 	  "Intel i82545GM 1000BASE-T Ethernet",
954 	  WM_T_82545_3,		WMP_F_COPPER },
955 
956 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
957 	  "Intel i82545GM 1000BASE-X Ethernet",
958 	  WM_T_82545_3,		WMP_F_FIBER },
959 
960 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
961 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
962 	  WM_T_82545_3,		WMP_F_SERDES },
963 
964 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
965 	  "Intel i82546EB 1000BASE-T Ethernet",
966 	  WM_T_82546,		WMP_F_COPPER },
967 
968 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
969 	  "Intel i82546EB 1000BASE-T Ethernet",
970 	  WM_T_82546,		WMP_F_COPPER },
971 
972 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
973 	  "Intel i82545EM 1000BASE-X Ethernet",
974 	  WM_T_82545,		WMP_F_FIBER },
975 
976 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
977 	  "Intel i82546EB 1000BASE-X Ethernet",
978 	  WM_T_82546,		WMP_F_FIBER },
979 
980 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
981 	  "Intel i82546GB 1000BASE-T Ethernet",
982 	  WM_T_82546_3,		WMP_F_COPPER },
983 
984 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
985 	  "Intel i82546GB 1000BASE-X Ethernet",
986 	  WM_T_82546_3,		WMP_F_FIBER },
987 
988 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
989 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
990 	  WM_T_82546_3,		WMP_F_SERDES },
991 
992 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
993 	  "i82546GB quad-port Gigabit Ethernet",
994 	  WM_T_82546_3,		WMP_F_COPPER },
995 
996 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
997 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
998 	  WM_T_82546_3,		WMP_F_COPPER },
999 
1000 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1001 	  "Intel PRO/1000MT (82546GB)",
1002 	  WM_T_82546_3,		WMP_F_COPPER },
1003 
1004 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1005 	  "Intel i82541EI 1000BASE-T Ethernet",
1006 	  WM_T_82541,		WMP_F_COPPER },
1007 
1008 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1009 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1010 	  WM_T_82541,		WMP_F_COPPER },
1011 
1012 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1013 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1014 	  WM_T_82541,		WMP_F_COPPER },
1015 
1016 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1017 	  "Intel i82541ER 1000BASE-T Ethernet",
1018 	  WM_T_82541_2,		WMP_F_COPPER },
1019 
1020 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1021 	  "Intel i82541GI 1000BASE-T Ethernet",
1022 	  WM_T_82541_2,		WMP_F_COPPER },
1023 
1024 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1025 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1026 	  WM_T_82541_2,		WMP_F_COPPER },
1027 
1028 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1029 	  "Intel i82541PI 1000BASE-T Ethernet",
1030 	  WM_T_82541_2,		WMP_F_COPPER },
1031 
1032 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1033 	  "Intel i82547EI 1000BASE-T Ethernet",
1034 	  WM_T_82547,		WMP_F_COPPER },
1035 
1036 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1037 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1038 	  WM_T_82547,		WMP_F_COPPER },
1039 
1040 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1041 	  "Intel i82547GI 1000BASE-T Ethernet",
1042 	  WM_T_82547_2,		WMP_F_COPPER },
1043 
1044 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1045 	  "Intel PRO/1000 PT (82571EB)",
1046 	  WM_T_82571,		WMP_F_COPPER },
1047 
1048 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1049 	  "Intel PRO/1000 PF (82571EB)",
1050 	  WM_T_82571,		WMP_F_FIBER },
1051 
1052 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1053 	  "Intel PRO/1000 PB (82571EB)",
1054 	  WM_T_82571,		WMP_F_SERDES },
1055 
1056 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1057 	  "Intel PRO/1000 QT (82571EB)",
1058 	  WM_T_82571,		WMP_F_COPPER },
1059 
1060 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1061 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1062 	  WM_T_82571,		WMP_F_COPPER, },
1063 
1064 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1065 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1066 	  WM_T_82571,		WMP_F_COPPER, },
1067 
1068 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1069 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1070 	  WM_T_82571,		WMP_F_SERDES, },
1071 
1072 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1073 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1074 	  WM_T_82571,		WMP_F_SERDES, },
1075 
1076 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1077 	  "Intel 82571EB Quad 1000baseX Ethernet",
1078 	  WM_T_82571,		WMP_F_FIBER, },
1079 
1080 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1081 	  "Intel i82572EI 1000baseT Ethernet",
1082 	  WM_T_82572,		WMP_F_COPPER },
1083 
1084 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1085 	  "Intel i82572EI 1000baseX Ethernet",
1086 	  WM_T_82572,		WMP_F_FIBER },
1087 
1088 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1089 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1090 	  WM_T_82572,		WMP_F_SERDES },
1091 
1092 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1093 	  "Intel i82572EI 1000baseT Ethernet",
1094 	  WM_T_82572,		WMP_F_COPPER },
1095 
1096 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1097 	  "Intel i82573E",
1098 	  WM_T_82573,		WMP_F_COPPER },
1099 
1100 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1101 	  "Intel i82573E IAMT",
1102 	  WM_T_82573,		WMP_F_COPPER },
1103 
1104 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1105 	  "Intel i82573L Gigabit Ethernet",
1106 	  WM_T_82573,		WMP_F_COPPER },
1107 
1108 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1109 	  "Intel i82574L",
1110 	  WM_T_82574,		WMP_F_COPPER },
1111 
1112 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1113 	  "Intel i82574L",
1114 	  WM_T_82574,		WMP_F_COPPER },
1115 
1116 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1117 	  "Intel i82583V",
1118 	  WM_T_82583,		WMP_F_COPPER },
1119 
1120 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1121 	  "i80003 dual 1000baseT Ethernet",
1122 	  WM_T_80003,		WMP_F_COPPER },
1123 
1124 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1125 	  "i80003 dual 1000baseX Ethernet",
1126 	  WM_T_80003,		WMP_F_COPPER },
1127 
1128 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1129 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1130 	  WM_T_80003,		WMP_F_SERDES },
1131 
1132 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1133 	  "Intel i80003 1000baseT Ethernet",
1134 	  WM_T_80003,		WMP_F_COPPER },
1135 
1136 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1137 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1138 	  WM_T_80003,		WMP_F_SERDES },
1139 
1140 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1141 	  "Intel i82801H (M_AMT) LAN Controller",
1142 	  WM_T_ICH8,		WMP_F_COPPER },
1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1144 	  "Intel i82801H (AMT) LAN Controller",
1145 	  WM_T_ICH8,		WMP_F_COPPER },
1146 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1147 	  "Intel i82801H LAN Controller",
1148 	  WM_T_ICH8,		WMP_F_COPPER },
1149 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1150 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1151 	  WM_T_ICH8,		WMP_F_COPPER },
1152 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1153 	  "Intel i82801H (M) LAN Controller",
1154 	  WM_T_ICH8,		WMP_F_COPPER },
1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1156 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1157 	  WM_T_ICH8,		WMP_F_COPPER },
1158 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1159 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1160 	  WM_T_ICH8,		WMP_F_COPPER },
1161 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1162 	  "82567V-3 LAN Controller",
1163 	  WM_T_ICH8,		WMP_F_COPPER },
1164 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1165 	  "82801I (AMT) LAN Controller",
1166 	  WM_T_ICH9,		WMP_F_COPPER },
1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1168 	  "82801I 10/100 LAN Controller",
1169 	  WM_T_ICH9,		WMP_F_COPPER },
1170 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1171 	  "82801I (G) 10/100 LAN Controller",
1172 	  WM_T_ICH9,		WMP_F_COPPER },
1173 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1174 	  "82801I (GT) 10/100 LAN Controller",
1175 	  WM_T_ICH9,		WMP_F_COPPER },
1176 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1177 	  "82801I (C) LAN Controller",
1178 	  WM_T_ICH9,		WMP_F_COPPER },
1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1180 	  "82801I mobile LAN Controller",
1181 	  WM_T_ICH9,		WMP_F_COPPER },
1182 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1183 	  "82801I mobile (V) LAN Controller",
1184 	  WM_T_ICH9,		WMP_F_COPPER },
1185 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1186 	  "82801I mobile (AMT) LAN Controller",
1187 	  WM_T_ICH9,		WMP_F_COPPER },
1188 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1189 	  "82567LM-4 LAN Controller",
1190 	  WM_T_ICH9,		WMP_F_COPPER },
1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1192 	  "82567LM-2 LAN Controller",
1193 	  WM_T_ICH10,		WMP_F_COPPER },
1194 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1195 	  "82567LF-2 LAN Controller",
1196 	  WM_T_ICH10,		WMP_F_COPPER },
1197 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1198 	  "82567LM-3 LAN Controller",
1199 	  WM_T_ICH10,		WMP_F_COPPER },
1200 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1201 	  "82567LF-3 LAN Controller",
1202 	  WM_T_ICH10,		WMP_F_COPPER },
1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1204 	  "82567V-2 LAN Controller",
1205 	  WM_T_ICH10,		WMP_F_COPPER },
1206 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1207 	  "82567V-3? LAN Controller",
1208 	  WM_T_ICH10,		WMP_F_COPPER },
1209 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1210 	  "HANKSVILLE LAN Controller",
1211 	  WM_T_ICH10,		WMP_F_COPPER },
1212 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1213 	  "PCH LAN (82577LM) Controller",
1214 	  WM_T_PCH,		WMP_F_COPPER },
1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1216 	  "PCH LAN (82577LC) Controller",
1217 	  WM_T_PCH,		WMP_F_COPPER },
1218 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1219 	  "PCH LAN (82578DM) Controller",
1220 	  WM_T_PCH,		WMP_F_COPPER },
1221 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1222 	  "PCH LAN (82578DC) Controller",
1223 	  WM_T_PCH,		WMP_F_COPPER },
1224 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1225 	  "PCH2 LAN (82579LM) Controller",
1226 	  WM_T_PCH2,		WMP_F_COPPER },
1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1228 	  "PCH2 LAN (82579V) Controller",
1229 	  WM_T_PCH2,		WMP_F_COPPER },
1230 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1231 	  "82575EB dual-1000baseT Ethernet",
1232 	  WM_T_82575,		WMP_F_COPPER },
1233 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1234 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1235 	  WM_T_82575,		WMP_F_SERDES },
1236 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1237 	  "82575GB quad-1000baseT Ethernet",
1238 	  WM_T_82575,		WMP_F_COPPER },
1239 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1240 	  "82575GB quad-1000baseT Ethernet (PM)",
1241 	  WM_T_82575,		WMP_F_COPPER },
1242 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1243 	  "82576 1000BaseT Ethernet",
1244 	  WM_T_82576,		WMP_F_COPPER },
1245 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1246 	  "82576 1000BaseX Ethernet",
1247 	  WM_T_82576,		WMP_F_FIBER },
1248 
1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1250 	  "82576 gigabit Ethernet (SERDES)",
1251 	  WM_T_82576,		WMP_F_SERDES },
1252 
1253 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1254 	  "82576 quad-1000BaseT Ethernet",
1255 	  WM_T_82576,		WMP_F_COPPER },
1256 
1257 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1258 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1259 	  WM_T_82576,		WMP_F_COPPER },
1260 
1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1262 	  "82576 gigabit Ethernet",
1263 	  WM_T_82576,		WMP_F_COPPER },
1264 
1265 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1266 	  "82576 gigabit Ethernet (SERDES)",
1267 	  WM_T_82576,		WMP_F_SERDES },
1268 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1269 	  "82576 quad-gigabit Ethernet (SERDES)",
1270 	  WM_T_82576,		WMP_F_SERDES },
1271 
1272 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1273 	  "82580 1000BaseT Ethernet",
1274 	  WM_T_82580,		WMP_F_COPPER },
1275 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1276 	  "82580 1000BaseX Ethernet",
1277 	  WM_T_82580,		WMP_F_FIBER },
1278 
1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1280 	  "82580 1000BaseT Ethernet (SERDES)",
1281 	  WM_T_82580,		WMP_F_SERDES },
1282 
1283 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1284 	  "82580 gigabit Ethernet (SGMII)",
1285 	  WM_T_82580,		WMP_F_COPPER },
1286 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1287 	  "82580 dual-1000BaseT Ethernet",
1288 	  WM_T_82580,		WMP_F_COPPER },
1289 
1290 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1291 	  "82580 quad-1000BaseX Ethernet",
1292 	  WM_T_82580,		WMP_F_FIBER },
1293 
1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1295 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1296 	  WM_T_82580,		WMP_F_COPPER },
1297 
1298 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1299 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1300 	  WM_T_82580,		WMP_F_SERDES },
1301 
1302 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1303 	  "DH89XXCC 1000BASE-KX Ethernet",
1304 	  WM_T_82580,		WMP_F_SERDES },
1305 
1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1307 	  "DH89XXCC Gigabit Ethernet (SFP)",
1308 	  WM_T_82580,		WMP_F_SERDES },
1309 
1310 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1311 	  "I350 Gigabit Network Connection",
1312 	  WM_T_I350,		WMP_F_COPPER },
1313 
1314 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1315 	  "I350 Gigabit Fiber Network Connection",
1316 	  WM_T_I350,		WMP_F_FIBER },
1317 
1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1319 	  "I350 Gigabit Backplane Connection",
1320 	  WM_T_I350,		WMP_F_SERDES },
1321 
1322 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1323 	  "I350 Quad Port Gigabit Ethernet",
1324 	  WM_T_I350,		WMP_F_SERDES },
1325 
1326 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1327 	  "I350 Gigabit Connection",
1328 	  WM_T_I350,		WMP_F_COPPER },
1329 
1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1331 	  "I354 Gigabit Ethernet (KX)",
1332 	  WM_T_I354,		WMP_F_SERDES },
1333 
1334 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1335 	  "I354 Gigabit Ethernet (SGMII)",
1336 	  WM_T_I354,		WMP_F_COPPER },
1337 
1338 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1339 	  "I354 Gigabit Ethernet (2.5G)",
1340 	  WM_T_I354,		WMP_F_COPPER },
1341 
1342 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1343 	  "I210-T1 Ethernet Server Adapter",
1344 	  WM_T_I210,		WMP_F_COPPER },
1345 
1346 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1347 	  "I210 Ethernet (Copper OEM)",
1348 	  WM_T_I210,		WMP_F_COPPER },
1349 
1350 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1351 	  "I210 Ethernet (Copper IT)",
1352 	  WM_T_I210,		WMP_F_COPPER },
1353 
1354 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1355 	  "I210 Ethernet (FLASH less)",
1356 	  WM_T_I210,		WMP_F_COPPER },
1357 
1358 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1359 	  "I210 Gigabit Ethernet (Fiber)",
1360 	  WM_T_I210,		WMP_F_FIBER },
1361 
1362 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1363 	  "I210 Gigabit Ethernet (SERDES)",
1364 	  WM_T_I210,		WMP_F_SERDES },
1365 
1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1367 	  "I210 Gigabit Ethernet (FLASH less)",
1368 	  WM_T_I210,		WMP_F_SERDES },
1369 
1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1371 	  "I210 Gigabit Ethernet (SGMII)",
1372 	  WM_T_I210,		WMP_F_COPPER },
1373 
1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1375 	  "I211 Ethernet (COPPER)",
1376 	  WM_T_I211,		WMP_F_COPPER },
1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1378 	  "I217 V Ethernet Connection",
1379 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1380 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1381 	  "I217 LM Ethernet Connection",
1382 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1383 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1384 	  "I218 V Ethernet Connection",
1385 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1386 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1387 	  "I218 V Ethernet Connection",
1388 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1390 	  "I218 V Ethernet Connection",
1391 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1392 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1393 	  "I218 LM Ethernet Connection",
1394 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1395 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1396 	  "I218 LM Ethernet Connection",
1397 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1398 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1399 	  "I218 LM Ethernet Connection",
1400 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1401 #if 0
1402 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1403 	  "I219 V Ethernet Connection",
1404 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1406 	  "I219 V Ethernet Connection",
1407 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1408 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1409 	  "I219 V Ethernet Connection",
1410 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1411 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1412 	  "I219 V Ethernet Connection",
1413 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1414 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1415 	  "I219 LM Ethernet Connection",
1416 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1418 	  "I219 LM Ethernet Connection",
1419 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1420 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1421 	  "I219 LM Ethernet Connection",
1422 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1423 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1424 	  "I219 LM Ethernet Connection",
1425 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1426 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1427 	  "I219 LM Ethernet Connection",
1428 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1429 #endif
1430 	{ 0,			0,
1431 	  NULL,
1432 	  0,			0 },
1433 };
1434 
1435 /*
1436  * Register read/write functions.
1437  * Other than CSR_{READ|WRITE}().
1438  */
1439 
1440 #if 0 /* Not currently used */
1441 static inline uint32_t
1442 wm_io_read(struct wm_softc *sc, int reg)
1443 {
1444 
1445 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1446 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1447 }
1448 #endif
1449 
1450 static inline void
1451 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1452 {
1453 
1454 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1455 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1456 }
1457 
1458 static inline void
1459 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1460     uint32_t data)
1461 {
1462 	uint32_t regval;
1463 	int i;
1464 
1465 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1466 
1467 	CSR_WRITE(sc, reg, regval);
1468 
1469 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1470 		delay(5);
1471 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1472 			break;
1473 	}
1474 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1475 		aprint_error("%s: WARNING:"
1476 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1477 		    device_xname(sc->sc_dev), reg);
1478 	}
1479 }
1480 
1481 static inline void
1482 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1483 {
1484 	wa->wa_low = htole32(v & 0xffffffffU);
1485 	if (sizeof(bus_addr_t) == 8)
1486 		wa->wa_high = htole32((uint64_t) v >> 32);
1487 	else
1488 		wa->wa_high = 0;
1489 }
1490 
1491 /*
1492  * Descriptor sync/init functions.
1493  */
1494 static inline void
1495 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1496 {
1497 	struct wm_softc *sc = txq->txq_sc;
1498 
1499 	/* If it will wrap around, sync to the end of the ring. */
1500 	if ((start + num) > WM_NTXDESC(txq)) {
1501 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1502 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1503 		    (WM_NTXDESC(txq) - start), ops);
1504 		num -= (WM_NTXDESC(txq) - start);
1505 		start = 0;
1506 	}
1507 
1508 	/* Now sync whatever is left. */
1509 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1510 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1511 }
1512 
1513 static inline void
1514 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1515 {
1516 	struct wm_softc *sc = rxq->rxq_sc;
1517 
1518 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1519 	    WM_CDRXOFF(start), sizeof(wiseman_rxdesc_t), ops);
1520 }
1521 
1522 static inline void
1523 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1524 {
1525 	struct wm_softc *sc = rxq->rxq_sc;
1526 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1527 	wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1528 	struct mbuf *m = rxs->rxs_mbuf;
1529 
1530 	/*
1531 	 * Note: We scoot the packet forward 2 bytes in the buffer
1532 	 * so that the payload after the Ethernet header is aligned
1533 	 * to a 4-byte boundary.
1534 
1535 	 * XXX BRAINDAMAGE ALERT!
1536 	 * The stupid chip uses the same size for every buffer, which
1537 	 * is set in the Receive Control register.  We are using the 2K
1538 	 * size option, but what we REALLY want is (2K - 2)!  For this
1539 	 * reason, we can't "scoot" packets longer than the standard
1540 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1541 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1542 	 * the upper layer copy the headers.
1543 	 */
1544 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1545 
1546 	wm_set_dma_addr(&rxd->wrx_addr,
1547 	    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1548 	rxd->wrx_len = 0;
1549 	rxd->wrx_cksum = 0;
1550 	rxd->wrx_status = 0;
1551 	rxd->wrx_errors = 0;
1552 	rxd->wrx_special = 0;
1553 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1554 
1555 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1556 }
1557 
1558 /*
1559  * Device driver interface functions and commonly used functions.
1560  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1561  */
1562 
1563 /* Lookup supported device table */
1564 static const struct wm_product *
1565 wm_lookup(const struct pci_attach_args *pa)
1566 {
1567 	const struct wm_product *wmp;
1568 
1569 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1570 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1571 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1572 			return wmp;
1573 	}
1574 	return NULL;
1575 }
1576 
1577 /* The match function (ca_match) */
1578 static int
1579 wm_match(device_t parent, cfdata_t cf, void *aux)
1580 {
1581 	struct pci_attach_args *pa = aux;
1582 
1583 	if (wm_lookup(pa) != NULL)
1584 		return 1;
1585 
1586 	return 0;
1587 }
1588 
1589 /* The attach function (ca_attach) */
1590 static void
1591 wm_attach(device_t parent, device_t self, void *aux)
1592 {
1593 	struct wm_softc *sc = device_private(self);
1594 	struct pci_attach_args *pa = aux;
1595 	prop_dictionary_t dict;
1596 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1597 	pci_chipset_tag_t pc = pa->pa_pc;
1598 	int counts[PCI_INTR_TYPE_SIZE];
1599 	pci_intr_type_t max_type;
1600 	const char *eetype, *xname;
1601 	bus_space_tag_t memt;
1602 	bus_space_handle_t memh;
1603 	bus_size_t memsize;
1604 	int memh_valid;
1605 	int i, error;
1606 	const struct wm_product *wmp;
1607 	prop_data_t ea;
1608 	prop_number_t pn;
1609 	uint8_t enaddr[ETHER_ADDR_LEN];
1610 	uint16_t cfg1, cfg2, swdpin, nvmword;
1611 	pcireg_t preg, memtype;
1612 	uint16_t eeprom_data, apme_mask;
1613 	bool force_clear_smbi;
1614 	uint32_t link_mode;
1615 	uint32_t reg;
1616 	void (*deferred_start_func)(struct ifnet *) = NULL;
1617 
1618 	sc->sc_dev = self;
1619 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1620 	sc->sc_core_stopping = false;
1621 
1622 	wmp = wm_lookup(pa);
1623 #ifdef DIAGNOSTIC
1624 	if (wmp == NULL) {
1625 		printf("\n");
1626 		panic("wm_attach: impossible");
1627 	}
1628 #endif
1629 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1630 
1631 	sc->sc_pc = pa->pa_pc;
1632 	sc->sc_pcitag = pa->pa_tag;
1633 
1634 	if (pci_dma64_available(pa))
1635 		sc->sc_dmat = pa->pa_dmat64;
1636 	else
1637 		sc->sc_dmat = pa->pa_dmat;
1638 
1639 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1640 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1641 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1642 
1643 	sc->sc_type = wmp->wmp_type;
1644 
1645 	/* Set default function pointers */
1646 	sc->phy.acquire = wm_get_null;
1647 	sc->phy.release = wm_put_null;
1648 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1649 
1650 	if (sc->sc_type < WM_T_82543) {
1651 		if (sc->sc_rev < 2) {
1652 			aprint_error_dev(sc->sc_dev,
1653 			    "i82542 must be at least rev. 2\n");
1654 			return;
1655 		}
1656 		if (sc->sc_rev < 3)
1657 			sc->sc_type = WM_T_82542_2_0;
1658 	}
1659 
1660 	/*
1661 	 * Disable MSI for Errata:
1662 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1663 	 *
1664 	 *  82544: Errata 25
1665 	 *  82540: Errata  6 (easy to reproduce device timeout)
1666 	 *  82545: Errata  4 (easy to reproduce device timeout)
1667 	 *  82546: Errata 26 (easy to reproduce device timeout)
1668 	 *  82541: Errata  7 (easy to reproduce device timeout)
1669 	 *
1670 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1671 	 *
1672 	 *  82571 & 82572: Errata 63
1673 	 */
1674 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1675 	    || (sc->sc_type == WM_T_82572))
1676 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1677 
1678 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1679 	    || (sc->sc_type == WM_T_82580)
1680 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1681 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1682 		sc->sc_flags |= WM_F_NEWQUEUE;
1683 
1684 	/* Set device properties (mactype) */
1685 	dict = device_properties(sc->sc_dev);
1686 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1687 
1688 	/*
1689 	 * Map the device.  All devices support memory-mapped acccess,
1690 	 * and it is really required for normal operation.
1691 	 */
1692 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1693 	switch (memtype) {
1694 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1695 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1696 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1697 		    memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1698 		break;
1699 	default:
1700 		memh_valid = 0;
1701 		break;
1702 	}
1703 
1704 	if (memh_valid) {
1705 		sc->sc_st = memt;
1706 		sc->sc_sh = memh;
1707 		sc->sc_ss = memsize;
1708 	} else {
1709 		aprint_error_dev(sc->sc_dev,
1710 		    "unable to map device registers\n");
1711 		return;
1712 	}
1713 
1714 	/*
1715 	 * In addition, i82544 and later support I/O mapped indirect
1716 	 * register access.  It is not desirable (nor supported in
1717 	 * this driver) to use it for normal operation, though it is
1718 	 * required to work around bugs in some chip versions.
1719 	 */
1720 	if (sc->sc_type >= WM_T_82544) {
1721 		/* First we have to find the I/O BAR. */
1722 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1723 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1724 			if (memtype == PCI_MAPREG_TYPE_IO)
1725 				break;
1726 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1727 			    PCI_MAPREG_MEM_TYPE_64BIT)
1728 				i += 4;	/* skip high bits, too */
1729 		}
1730 		if (i < PCI_MAPREG_END) {
1731 			/*
1732 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1733 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1734 			 * It's no problem because newer chips has no this
1735 			 * bug.
1736 			 *
1737 			 * The i8254x doesn't apparently respond when the
1738 			 * I/O BAR is 0, which looks somewhat like it's not
1739 			 * been configured.
1740 			 */
1741 			preg = pci_conf_read(pc, pa->pa_tag, i);
1742 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1743 				aprint_error_dev(sc->sc_dev,
1744 				    "WARNING: I/O BAR at zero.\n");
1745 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1746 					0, &sc->sc_iot, &sc->sc_ioh,
1747 					NULL, &sc->sc_ios) == 0) {
1748 				sc->sc_flags |= WM_F_IOH_VALID;
1749 			} else {
1750 				aprint_error_dev(sc->sc_dev,
1751 				    "WARNING: unable to map I/O space\n");
1752 			}
1753 		}
1754 
1755 	}
1756 
1757 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1758 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1759 	preg |= PCI_COMMAND_MASTER_ENABLE;
1760 	if (sc->sc_type < WM_T_82542_2_1)
1761 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1762 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1763 
1764 	/* power up chip */
1765 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self,
1766 	    NULL)) && error != EOPNOTSUPP) {
1767 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1768 		return;
1769 	}
1770 
1771 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1772 
1773 	/* Allocation settings */
1774 	max_type = PCI_INTR_TYPE_MSIX;
1775 	counts[PCI_INTR_TYPE_MSIX] = sc->sc_nqueues + 1;
1776 	counts[PCI_INTR_TYPE_MSI] = 1;
1777 	counts[PCI_INTR_TYPE_INTX] = 1;
1778 
1779 alloc_retry:
1780 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1781 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1782 		return;
1783 	}
1784 
1785 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1786 		error = wm_setup_msix(sc);
1787 		if (error) {
1788 			pci_intr_release(pc, sc->sc_intrs,
1789 			    counts[PCI_INTR_TYPE_MSIX]);
1790 
1791 			/* Setup for MSI: Disable MSI-X */
1792 			max_type = PCI_INTR_TYPE_MSI;
1793 			counts[PCI_INTR_TYPE_MSI] = 1;
1794 			counts[PCI_INTR_TYPE_INTX] = 1;
1795 			goto alloc_retry;
1796 		}
1797 	} else 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1798 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1799 		error = wm_setup_legacy(sc);
1800 		if (error) {
1801 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1802 			    counts[PCI_INTR_TYPE_MSI]);
1803 
1804 			/* The next try is for INTx: Disable MSI */
1805 			max_type = PCI_INTR_TYPE_INTX;
1806 			counts[PCI_INTR_TYPE_INTX] = 1;
1807 			goto alloc_retry;
1808 		}
1809 	} else {
1810 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1811 		error = wm_setup_legacy(sc);
1812 		if (error) {
1813 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1814 			    counts[PCI_INTR_TYPE_INTX]);
1815 			return;
1816 		}
1817 	}
1818 
1819 	/*
1820 	 * Check the function ID (unit number of the chip).
1821 	 */
1822 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1823 	    || (sc->sc_type ==  WM_T_82571) || (sc->sc_type == WM_T_80003)
1824 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1825 	    || (sc->sc_type == WM_T_82580)
1826 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1827 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1828 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1829 	else
1830 		sc->sc_funcid = 0;
1831 
1832 	/*
1833 	 * Determine a few things about the bus we're connected to.
1834 	 */
1835 	if (sc->sc_type < WM_T_82543) {
1836 		/* We don't really know the bus characteristics here. */
1837 		sc->sc_bus_speed = 33;
1838 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1839 		/*
1840 		 * CSA (Communication Streaming Architecture) is about as fast
1841 		 * a 32-bit 66MHz PCI Bus.
1842 		 */
1843 		sc->sc_flags |= WM_F_CSA;
1844 		sc->sc_bus_speed = 66;
1845 		aprint_verbose_dev(sc->sc_dev,
1846 		    "Communication Streaming Architecture\n");
1847 		if (sc->sc_type == WM_T_82547) {
1848 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1849 			callout_setfunc(&sc->sc_txfifo_ch,
1850 					wm_82547_txfifo_stall, sc);
1851 			aprint_verbose_dev(sc->sc_dev,
1852 			    "using 82547 Tx FIFO stall work-around\n");
1853 		}
1854 	} else if (sc->sc_type >= WM_T_82571) {
1855 		sc->sc_flags |= WM_F_PCIE;
1856 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1857 		    && (sc->sc_type != WM_T_ICH10)
1858 		    && (sc->sc_type != WM_T_PCH)
1859 		    && (sc->sc_type != WM_T_PCH2)
1860 		    && (sc->sc_type != WM_T_PCH_LPT)
1861 		    && (sc->sc_type != WM_T_PCH_SPT)) {
1862 			/* ICH* and PCH* have no PCIe capability registers */
1863 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1864 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
1865 				NULL) == 0)
1866 				aprint_error_dev(sc->sc_dev,
1867 				    "unable to find PCIe capability\n");
1868 		}
1869 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
1870 	} else {
1871 		reg = CSR_READ(sc, WMREG_STATUS);
1872 		if (reg & STATUS_BUS64)
1873 			sc->sc_flags |= WM_F_BUS64;
1874 		if ((reg & STATUS_PCIX_MODE) != 0) {
1875 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
1876 
1877 			sc->sc_flags |= WM_F_PCIX;
1878 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
1879 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
1880 				aprint_error_dev(sc->sc_dev,
1881 				    "unable to find PCIX capability\n");
1882 			else if (sc->sc_type != WM_T_82545_3 &&
1883 				 sc->sc_type != WM_T_82546_3) {
1884 				/*
1885 				 * Work around a problem caused by the BIOS
1886 				 * setting the max memory read byte count
1887 				 * incorrectly.
1888 				 */
1889 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
1890 				    sc->sc_pcixe_capoff + PCIX_CMD);
1891 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
1892 				    sc->sc_pcixe_capoff + PCIX_STATUS);
1893 
1894 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
1895 				    PCIX_CMD_BYTECNT_SHIFT;
1896 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
1897 				    PCIX_STATUS_MAXB_SHIFT;
1898 				if (bytecnt > maxb) {
1899 					aprint_verbose_dev(sc->sc_dev,
1900 					    "resetting PCI-X MMRBC: %d -> %d\n",
1901 					    512 << bytecnt, 512 << maxb);
1902 					pcix_cmd = (pcix_cmd &
1903 					    ~PCIX_CMD_BYTECNT_MASK) |
1904 					   (maxb << PCIX_CMD_BYTECNT_SHIFT);
1905 					pci_conf_write(pa->pa_pc, pa->pa_tag,
1906 					    sc->sc_pcixe_capoff + PCIX_CMD,
1907 					    pcix_cmd);
1908 				}
1909 			}
1910 		}
1911 		/*
1912 		 * The quad port adapter is special; it has a PCIX-PCIX
1913 		 * bridge on the board, and can run the secondary bus at
1914 		 * a higher speed.
1915 		 */
1916 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
1917 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
1918 								      : 66;
1919 		} else if (sc->sc_flags & WM_F_PCIX) {
1920 			switch (reg & STATUS_PCIXSPD_MASK) {
1921 			case STATUS_PCIXSPD_50_66:
1922 				sc->sc_bus_speed = 66;
1923 				break;
1924 			case STATUS_PCIXSPD_66_100:
1925 				sc->sc_bus_speed = 100;
1926 				break;
1927 			case STATUS_PCIXSPD_100_133:
1928 				sc->sc_bus_speed = 133;
1929 				break;
1930 			default:
1931 				aprint_error_dev(sc->sc_dev,
1932 				    "unknown PCIXSPD %d; assuming 66MHz\n",
1933 				    reg & STATUS_PCIXSPD_MASK);
1934 				sc->sc_bus_speed = 66;
1935 				break;
1936 			}
1937 		} else
1938 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
1939 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
1940 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
1941 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
1942 	}
1943 
1944 	/* clear interesting stat counters */
1945 	CSR_READ(sc, WMREG_COLC);
1946 	CSR_READ(sc, WMREG_RXERRC);
1947 
1948 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
1949 	    || (sc->sc_type >= WM_T_ICH8))
1950 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1951 	if (sc->sc_type >= WM_T_ICH8)
1952 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
1953 
1954 	/* Set PHY, NVM mutex related stuff */
1955 	switch (sc->sc_type) {
1956 	case WM_T_82542_2_0:
1957 	case WM_T_82542_2_1:
1958 	case WM_T_82543:
1959 	case WM_T_82544:
1960 		/* Microwire */
1961 		sc->sc_nvm_wordsize = 64;
1962 		sc->sc_nvm_addrbits = 6;
1963 		break;
1964 	case WM_T_82540:
1965 	case WM_T_82545:
1966 	case WM_T_82545_3:
1967 	case WM_T_82546:
1968 	case WM_T_82546_3:
1969 		/* Microwire */
1970 		reg = CSR_READ(sc, WMREG_EECD);
1971 		if (reg & EECD_EE_SIZE) {
1972 			sc->sc_nvm_wordsize = 256;
1973 			sc->sc_nvm_addrbits = 8;
1974 		} else {
1975 			sc->sc_nvm_wordsize = 64;
1976 			sc->sc_nvm_addrbits = 6;
1977 		}
1978 		sc->sc_flags |= WM_F_LOCK_EECD;
1979 		break;
1980 	case WM_T_82541:
1981 	case WM_T_82541_2:
1982 	case WM_T_82547:
1983 	case WM_T_82547_2:
1984 		sc->sc_flags |= WM_F_LOCK_EECD;
1985 		reg = CSR_READ(sc, WMREG_EECD);
1986 		if (reg & EECD_EE_TYPE) {
1987 			/* SPI */
1988 			sc->sc_flags |= WM_F_EEPROM_SPI;
1989 			wm_nvm_set_addrbits_size_eecd(sc);
1990 		} else {
1991 			/* Microwire */
1992 			if ((reg & EECD_EE_ABITS) != 0) {
1993 				sc->sc_nvm_wordsize = 256;
1994 				sc->sc_nvm_addrbits = 8;
1995 			} else {
1996 				sc->sc_nvm_wordsize = 64;
1997 				sc->sc_nvm_addrbits = 6;
1998 			}
1999 		}
2000 		break;
2001 	case WM_T_82571:
2002 	case WM_T_82572:
2003 		/* SPI */
2004 		sc->sc_flags |= WM_F_EEPROM_SPI;
2005 		wm_nvm_set_addrbits_size_eecd(sc);
2006 		sc->sc_flags |= WM_F_LOCK_EECD | WM_F_LOCK_SWSM;
2007 		sc->phy.acquire = wm_get_swsm_semaphore;
2008 		sc->phy.release = wm_put_swsm_semaphore;
2009 		break;
2010 	case WM_T_82573:
2011 	case WM_T_82574:
2012 	case WM_T_82583:
2013 		if (sc->sc_type == WM_T_82573) {
2014 			sc->sc_flags |= WM_F_LOCK_SWSM;
2015 			sc->phy.acquire = wm_get_swsm_semaphore;
2016 			sc->phy.release = wm_put_swsm_semaphore;
2017 		} else {
2018 			sc->sc_flags |= WM_F_LOCK_EXTCNF;
2019 			/* Both PHY and NVM use the same semaphore. */
2020 			sc->phy.acquire
2021 			    = wm_get_swfwhw_semaphore;
2022 			sc->phy.release
2023 			    = wm_put_swfwhw_semaphore;
2024 		}
2025 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2026 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2027 			sc->sc_nvm_wordsize = 2048;
2028 		} else {
2029 			/* SPI */
2030 			sc->sc_flags |= WM_F_EEPROM_SPI;
2031 			wm_nvm_set_addrbits_size_eecd(sc);
2032 		}
2033 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2034 		break;
2035 	case WM_T_82575:
2036 	case WM_T_82576:
2037 	case WM_T_82580:
2038 	case WM_T_I350:
2039 	case WM_T_I354:
2040 	case WM_T_80003:
2041 		/* SPI */
2042 		sc->sc_flags |= WM_F_EEPROM_SPI;
2043 		wm_nvm_set_addrbits_size_eecd(sc);
2044 		sc->sc_flags |= WM_F_EEPROM_EERDEEWR | WM_F_LOCK_SWFW
2045 		    | WM_F_LOCK_SWSM;
2046 		sc->phy.acquire = wm_get_phy_82575;
2047 		sc->phy.release = wm_put_phy_82575;
2048 		break;
2049 	case WM_T_ICH8:
2050 	case WM_T_ICH9:
2051 	case WM_T_ICH10:
2052 	case WM_T_PCH:
2053 	case WM_T_PCH2:
2054 	case WM_T_PCH_LPT:
2055 		/* FLASH */
2056 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2057 		sc->sc_nvm_wordsize = 2048;
2058 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2059 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2060 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2061 			aprint_error_dev(sc->sc_dev,
2062 			    "can't map FLASH registers\n");
2063 			goto out;
2064 		}
2065 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2066 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2067 		    ICH_FLASH_SECTOR_SIZE;
2068 		sc->sc_ich8_flash_bank_size =
2069 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2070 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2071 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2072 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2073 		sc->sc_flashreg_offset = 0;
2074 		sc->phy.acquire = wm_get_swflag_ich8lan;
2075 		sc->phy.release = wm_put_swflag_ich8lan;
2076 		break;
2077 	case WM_T_PCH_SPT:
2078 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2079 		sc->sc_flags |= WM_F_EEPROM_FLASH | WM_F_LOCK_EXTCNF;
2080 		sc->sc_flasht = sc->sc_st;
2081 		sc->sc_flashh = sc->sc_sh;
2082 		sc->sc_ich8_flash_base = 0;
2083 		sc->sc_nvm_wordsize =
2084 			(((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2085 			* NVM_SIZE_MULTIPLIER;
2086 		/* It is size in bytes, we want words */
2087 		sc->sc_nvm_wordsize /= 2;
2088 		/* assume 2 banks */
2089 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2090 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2091 		sc->phy.acquire = wm_get_swflag_ich8lan;
2092 		sc->phy.release = wm_put_swflag_ich8lan;
2093 		break;
2094 	case WM_T_I210:
2095 	case WM_T_I211:
2096 		if (wm_nvm_get_flash_presence_i210(sc)) {
2097 			wm_nvm_set_addrbits_size_eecd(sc);
2098 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2099 			sc->sc_flags |= WM_F_EEPROM_EERDEEWR;
2100 		} else {
2101 			sc->sc_nvm_wordsize = INVM_SIZE;
2102 			sc->sc_flags |= WM_F_EEPROM_INVM;
2103 		}
2104 		sc->sc_flags |= WM_F_LOCK_SWFW | WM_F_LOCK_SWSM;
2105 		sc->phy.acquire = wm_get_phy_82575;
2106 		sc->phy.release = wm_put_phy_82575;
2107 		break;
2108 	default:
2109 		break;
2110 	}
2111 
2112 	/* Reset the chip to a known state. */
2113 	wm_reset(sc);
2114 
2115 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2116 	switch (sc->sc_type) {
2117 	case WM_T_82571:
2118 	case WM_T_82572:
2119 		reg = CSR_READ(sc, WMREG_SWSM2);
2120 		if ((reg & SWSM2_LOCK) == 0) {
2121 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2122 			force_clear_smbi = true;
2123 		} else
2124 			force_clear_smbi = false;
2125 		break;
2126 	case WM_T_82573:
2127 	case WM_T_82574:
2128 	case WM_T_82583:
2129 		force_clear_smbi = true;
2130 		break;
2131 	default:
2132 		force_clear_smbi = false;
2133 		break;
2134 	}
2135 	if (force_clear_smbi) {
2136 		reg = CSR_READ(sc, WMREG_SWSM);
2137 		if ((reg & SWSM_SMBI) != 0)
2138 			aprint_error_dev(sc->sc_dev,
2139 			    "Please update the Bootagent\n");
2140 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2141 	}
2142 
2143 	/*
2144 	 * Defer printing the EEPROM type until after verifying the checksum
2145 	 * This allows the EEPROM type to be printed correctly in the case
2146 	 * that no EEPROM is attached.
2147 	 */
2148 	/*
2149 	 * Validate the EEPROM checksum. If the checksum fails, flag
2150 	 * this for later, so we can fail future reads from the EEPROM.
2151 	 */
2152 	if (wm_nvm_validate_checksum(sc)) {
2153 		/*
2154 		 * Read twice again because some PCI-e parts fail the
2155 		 * first check due to the link being in sleep state.
2156 		 */
2157 		if (wm_nvm_validate_checksum(sc))
2158 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2159 	}
2160 
2161 	/* Set device properties (macflags) */
2162 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2163 
2164 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2165 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2166 	else {
2167 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2168 		    sc->sc_nvm_wordsize);
2169 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2170 			aprint_verbose("iNVM");
2171 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2172 			aprint_verbose("FLASH(HW)");
2173 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2174 			aprint_verbose("FLASH");
2175 		else {
2176 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2177 				eetype = "SPI";
2178 			else
2179 				eetype = "MicroWire";
2180 			aprint_verbose("(%d address bits) %s EEPROM",
2181 			    sc->sc_nvm_addrbits, eetype);
2182 		}
2183 	}
2184 	wm_nvm_version(sc);
2185 	aprint_verbose("\n");
2186 
2187 	/* Check for I21[01] PLL workaround */
2188 	if (sc->sc_type == WM_T_I210)
2189 		sc->sc_flags |= WM_F_PLL_WA_I210;
2190 	if ((sc->sc_type == WM_T_I210) && wm_nvm_get_flash_presence_i210(sc)) {
2191 		/* NVM image release 3.25 has a workaround */
2192 		if ((sc->sc_nvm_ver_major < 3)
2193 		    || ((sc->sc_nvm_ver_major == 3)
2194 			&& (sc->sc_nvm_ver_minor < 25))) {
2195 			aprint_verbose_dev(sc->sc_dev,
2196 			    "ROM image version %d.%d is older than 3.25\n",
2197 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2198 			sc->sc_flags |= WM_F_PLL_WA_I210;
2199 		}
2200 	}
2201 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2202 		wm_pll_workaround_i210(sc);
2203 
2204 	wm_get_wakeup(sc);
2205 
2206 	/* Non-AMT based hardware can now take control from firmware */
2207 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2208 		wm_get_hw_control(sc);
2209 
2210 	/*
2211 	 * Read the Ethernet address from the EEPROM, if not first found
2212 	 * in device properties.
2213 	 */
2214 	ea = prop_dictionary_get(dict, "mac-address");
2215 	if (ea != NULL) {
2216 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2217 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2218 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2219 	} else {
2220 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2221 			aprint_error_dev(sc->sc_dev,
2222 			    "unable to read Ethernet address\n");
2223 			goto out;
2224 		}
2225 	}
2226 
2227 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2228 	    ether_sprintf(enaddr));
2229 
2230 	/*
2231 	 * Read the config info from the EEPROM, and set up various
2232 	 * bits in the control registers based on their contents.
2233 	 */
2234 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2235 	if (pn != NULL) {
2236 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2237 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2238 	} else {
2239 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2240 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2241 			goto out;
2242 		}
2243 	}
2244 
2245 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2246 	if (pn != NULL) {
2247 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2248 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2249 	} else {
2250 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2251 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2252 			goto out;
2253 		}
2254 	}
2255 
2256 	/* check for WM_F_WOL */
2257 	switch (sc->sc_type) {
2258 	case WM_T_82542_2_0:
2259 	case WM_T_82542_2_1:
2260 	case WM_T_82543:
2261 		/* dummy? */
2262 		eeprom_data = 0;
2263 		apme_mask = NVM_CFG3_APME;
2264 		break;
2265 	case WM_T_82544:
2266 		apme_mask = NVM_CFG2_82544_APM_EN;
2267 		eeprom_data = cfg2;
2268 		break;
2269 	case WM_T_82546:
2270 	case WM_T_82546_3:
2271 	case WM_T_82571:
2272 	case WM_T_82572:
2273 	case WM_T_82573:
2274 	case WM_T_82574:
2275 	case WM_T_82583:
2276 	case WM_T_80003:
2277 	default:
2278 		apme_mask = NVM_CFG3_APME;
2279 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2280 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2281 		break;
2282 	case WM_T_82575:
2283 	case WM_T_82576:
2284 	case WM_T_82580:
2285 	case WM_T_I350:
2286 	case WM_T_I354: /* XXX ok? */
2287 	case WM_T_ICH8:
2288 	case WM_T_ICH9:
2289 	case WM_T_ICH10:
2290 	case WM_T_PCH:
2291 	case WM_T_PCH2:
2292 	case WM_T_PCH_LPT:
2293 	case WM_T_PCH_SPT:
2294 		/* XXX The funcid should be checked on some devices */
2295 		apme_mask = WUC_APME;
2296 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2297 		break;
2298 	}
2299 
2300 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2301 	if ((eeprom_data & apme_mask) != 0)
2302 		sc->sc_flags |= WM_F_WOL;
2303 #ifdef WM_DEBUG
2304 	if ((sc->sc_flags & WM_F_WOL) != 0)
2305 		printf("WOL\n");
2306 #endif
2307 
2308 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2309 		/* Check NVM for autonegotiation */
2310 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2311 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2312 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2313 		}
2314 	}
2315 
2316 	/*
2317 	 * XXX need special handling for some multiple port cards
2318 	 * to disable a paticular port.
2319 	 */
2320 
2321 	if (sc->sc_type >= WM_T_82544) {
2322 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2323 		if (pn != NULL) {
2324 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2325 			swdpin = (uint16_t) prop_number_integer_value(pn);
2326 		} else {
2327 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2328 				aprint_error_dev(sc->sc_dev,
2329 				    "unable to read SWDPIN\n");
2330 				goto out;
2331 			}
2332 		}
2333 	}
2334 
2335 	if (cfg1 & NVM_CFG1_ILOS)
2336 		sc->sc_ctrl |= CTRL_ILOS;
2337 
2338 	/*
2339 	 * XXX
2340 	 * This code isn't correct because pin 2 and 3 are located
2341 	 * in different position on newer chips. Check all datasheet.
2342 	 *
2343 	 * Until resolve this problem, check if a chip < 82580
2344 	 */
2345 	if (sc->sc_type <= WM_T_82580) {
2346 		if (sc->sc_type >= WM_T_82544) {
2347 			sc->sc_ctrl |=
2348 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2349 			    CTRL_SWDPIO_SHIFT;
2350 			sc->sc_ctrl |=
2351 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2352 			    CTRL_SWDPINS_SHIFT;
2353 		} else {
2354 			sc->sc_ctrl |=
2355 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2356 			    CTRL_SWDPIO_SHIFT;
2357 		}
2358 	}
2359 
2360 	/* XXX For other than 82580? */
2361 	if (sc->sc_type == WM_T_82580) {
2362 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2363 		if (nvmword & __BIT(13))
2364 			sc->sc_ctrl |= CTRL_ILOS;
2365 	}
2366 
2367 #if 0
2368 	if (sc->sc_type >= WM_T_82544) {
2369 		if (cfg1 & NVM_CFG1_IPS0)
2370 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2371 		if (cfg1 & NVM_CFG1_IPS1)
2372 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2373 		sc->sc_ctrl_ext |=
2374 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2375 		    CTRL_EXT_SWDPIO_SHIFT;
2376 		sc->sc_ctrl_ext |=
2377 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2378 		    CTRL_EXT_SWDPINS_SHIFT;
2379 	} else {
2380 		sc->sc_ctrl_ext |=
2381 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2382 		    CTRL_EXT_SWDPIO_SHIFT;
2383 	}
2384 #endif
2385 
2386 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2387 #if 0
2388 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2389 #endif
2390 
2391 	if (sc->sc_type == WM_T_PCH) {
2392 		uint16_t val;
2393 
2394 		/* Save the NVM K1 bit setting */
2395 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2396 
2397 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2398 			sc->sc_nvm_k1_enabled = 1;
2399 		else
2400 			sc->sc_nvm_k1_enabled = 0;
2401 	}
2402 
2403 	/*
2404 	 * Determine if we're TBI,GMII or SGMII mode, and initialize the
2405 	 * media structures accordingly.
2406 	 */
2407 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2408 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2409 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2410 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_82573
2411 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2412 		/* STATUS_TBIMODE reserved/reused, can't rely on it */
2413 		wm_gmii_mediainit(sc, wmp->wmp_product);
2414 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2415 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2416 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2417 	    || (sc->sc_type ==WM_T_I211)) {
2418 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2419 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2420 		switch (link_mode) {
2421 		case CTRL_EXT_LINK_MODE_1000KX:
2422 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2423 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2424 			break;
2425 		case CTRL_EXT_LINK_MODE_SGMII:
2426 			if (wm_sgmii_uses_mdio(sc)) {
2427 				aprint_verbose_dev(sc->sc_dev,
2428 				    "SGMII(MDIO)\n");
2429 				sc->sc_flags |= WM_F_SGMII;
2430 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2431 				break;
2432 			}
2433 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2434 			/*FALLTHROUGH*/
2435 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2436 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2437 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2438 				if (link_mode
2439 				    == CTRL_EXT_LINK_MODE_SGMII) {
2440 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2441 					sc->sc_flags |= WM_F_SGMII;
2442 				} else {
2443 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2444 					aprint_verbose_dev(sc->sc_dev,
2445 					    "SERDES\n");
2446 				}
2447 				break;
2448 			}
2449 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2450 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2451 
2452 			/* Change current link mode setting */
2453 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2454 			switch (sc->sc_mediatype) {
2455 			case WM_MEDIATYPE_COPPER:
2456 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2457 				break;
2458 			case WM_MEDIATYPE_SERDES:
2459 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2460 				break;
2461 			default:
2462 				break;
2463 			}
2464 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2465 			break;
2466 		case CTRL_EXT_LINK_MODE_GMII:
2467 		default:
2468 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
2469 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2470 			break;
2471 		}
2472 
2473 		reg &= ~CTRL_EXT_I2C_ENA;
2474 		if ((sc->sc_flags & WM_F_SGMII) != 0)
2475 			reg |= CTRL_EXT_I2C_ENA;
2476 		else
2477 			reg &= ~CTRL_EXT_I2C_ENA;
2478 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2479 
2480 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2481 			wm_gmii_mediainit(sc, wmp->wmp_product);
2482 		else
2483 			wm_tbi_mediainit(sc);
2484 	} else if (sc->sc_type < WM_T_82543 ||
2485 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2486 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2487 			aprint_error_dev(sc->sc_dev,
2488 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2489 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2490 		}
2491 		wm_tbi_mediainit(sc);
2492 	} else {
2493 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2494 			aprint_error_dev(sc->sc_dev,
2495 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2496 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2497 		}
2498 		wm_gmii_mediainit(sc, wmp->wmp_product);
2499 	}
2500 
2501 	ifp = &sc->sc_ethercom.ec_if;
2502 	xname = device_xname(sc->sc_dev);
2503 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2504 	ifp->if_softc = sc;
2505 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2506 	ifp->if_extflags = IFEF_START_MPSAFE;
2507 	ifp->if_ioctl = wm_ioctl;
2508 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2509 		ifp->if_start = wm_nq_start;
2510 		if (sc->sc_nqueues > 1) {
2511 			ifp->if_transmit = wm_nq_transmit;
2512 			deferred_start_func = wm_deferred_start;
2513 		}
2514 	} else {
2515 		ifp->if_start = wm_start;
2516 		if (sc->sc_nqueues > 1) {
2517 			ifp->if_transmit = wm_transmit;
2518 			deferred_start_func = wm_deferred_start;
2519 		}
2520 	}
2521 	ifp->if_watchdog = wm_watchdog;
2522 	ifp->if_init = wm_init;
2523 	ifp->if_stop = wm_stop;
2524 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2525 	IFQ_SET_READY(&ifp->if_snd);
2526 
2527 	/* Check for jumbo frame */
2528 	switch (sc->sc_type) {
2529 	case WM_T_82573:
2530 		/* XXX limited to 9234 if ASPM is disabled */
2531 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2532 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2533 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2534 		break;
2535 	case WM_T_82571:
2536 	case WM_T_82572:
2537 	case WM_T_82574:
2538 	case WM_T_82575:
2539 	case WM_T_82576:
2540 	case WM_T_82580:
2541 	case WM_T_I350:
2542 	case WM_T_I354: /* XXXX ok? */
2543 	case WM_T_I210:
2544 	case WM_T_I211:
2545 	case WM_T_80003:
2546 	case WM_T_ICH9:
2547 	case WM_T_ICH10:
2548 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2549 	case WM_T_PCH_LPT:
2550 	case WM_T_PCH_SPT:
2551 		/* XXX limited to 9234 */
2552 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2553 		break;
2554 	case WM_T_PCH:
2555 		/* XXX limited to 4096 */
2556 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2557 		break;
2558 	case WM_T_82542_2_0:
2559 	case WM_T_82542_2_1:
2560 	case WM_T_82583:
2561 	case WM_T_ICH8:
2562 		/* No support for jumbo frame */
2563 		break;
2564 	default:
2565 		/* ETHER_MAX_LEN_JUMBO */
2566 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2567 		break;
2568 	}
2569 
2570 	/* If we're a i82543 or greater, we can support VLANs. */
2571 	if (sc->sc_type >= WM_T_82543)
2572 		sc->sc_ethercom.ec_capabilities |=
2573 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2574 
2575 	/*
2576 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2577 	 * on i82543 and later.
2578 	 */
2579 	if (sc->sc_type >= WM_T_82543) {
2580 		ifp->if_capabilities |=
2581 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2582 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2583 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2584 		    IFCAP_CSUM_TCPv6_Tx |
2585 		    IFCAP_CSUM_UDPv6_Tx;
2586 	}
2587 
2588 	/*
2589 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2590 	 *
2591 	 *	82541GI (8086:1076) ... no
2592 	 *	82572EI (8086:10b9) ... yes
2593 	 */
2594 	if (sc->sc_type >= WM_T_82571) {
2595 		ifp->if_capabilities |=
2596 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2597 	}
2598 
2599 	/*
2600 	 * If we're a i82544 or greater (except i82547), we can do
2601 	 * TCP segmentation offload.
2602 	 */
2603 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2604 		ifp->if_capabilities |= IFCAP_TSOv4;
2605 	}
2606 
2607 	if (sc->sc_type >= WM_T_82571) {
2608 		ifp->if_capabilities |= IFCAP_TSOv6;
2609 	}
2610 
2611 #ifdef WM_MPSAFE
2612 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2613 #else
2614 	sc->sc_core_lock = NULL;
2615 #endif
2616 
2617 	/* Attach the interface. */
2618 	if_initialize(ifp);
2619 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2620 	if_deferred_start_init(ifp, deferred_start_func);
2621 	ether_ifattach(ifp, enaddr);
2622 	if_register(ifp);
2623 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2624 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2625 			  RND_FLAG_DEFAULT);
2626 
2627 #ifdef WM_EVENT_COUNTERS
2628 	/* Attach event counters. */
2629 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2630 	    NULL, xname, "linkintr");
2631 
2632 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2633 	    NULL, xname, "tx_xoff");
2634 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2635 	    NULL, xname, "tx_xon");
2636 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2637 	    NULL, xname, "rx_xoff");
2638 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2639 	    NULL, xname, "rx_xon");
2640 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2641 	    NULL, xname, "rx_macctl");
2642 #endif /* WM_EVENT_COUNTERS */
2643 
2644 	if (pmf_device_register(self, wm_suspend, wm_resume))
2645 		pmf_class_network_register(self, ifp);
2646 	else
2647 		aprint_error_dev(self, "couldn't establish power handler\n");
2648 
2649 	sc->sc_flags |= WM_F_ATTACHED;
2650  out:
2651 	return;
2652 }
2653 
2654 /* The detach function (ca_detach) */
2655 static int
2656 wm_detach(device_t self, int flags __unused)
2657 {
2658 	struct wm_softc *sc = device_private(self);
2659 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2660 	int i;
2661 
2662 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2663 		return 0;
2664 
2665 	/* Stop the interface. Callouts are stopped in it. */
2666 	wm_stop(ifp, 1);
2667 
2668 	pmf_device_deregister(self);
2669 
2670 	/* Tell the firmware about the release */
2671 	WM_CORE_LOCK(sc);
2672 	wm_release_manageability(sc);
2673 	wm_release_hw_control(sc);
2674 	wm_enable_wakeup(sc);
2675 	WM_CORE_UNLOCK(sc);
2676 
2677 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2678 
2679 	/* Delete all remaining media. */
2680 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2681 
2682 	ether_ifdetach(ifp);
2683 	if_detach(ifp);
2684 	if_percpuq_destroy(sc->sc_ipq);
2685 
2686 	/* Unload RX dmamaps and free mbufs */
2687 	for (i = 0; i < sc->sc_nqueues; i++) {
2688 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2689 		mutex_enter(rxq->rxq_lock);
2690 		wm_rxdrain(rxq);
2691 		mutex_exit(rxq->rxq_lock);
2692 	}
2693 	/* Must unlock here */
2694 
2695 	/* Disestablish the interrupt handler */
2696 	for (i = 0; i < sc->sc_nintrs; i++) {
2697 		if (sc->sc_ihs[i] != NULL) {
2698 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2699 			sc->sc_ihs[i] = NULL;
2700 		}
2701 	}
2702 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2703 
2704 	wm_free_txrx_queues(sc);
2705 
2706 	/* Unmap the registers */
2707 	if (sc->sc_ss) {
2708 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2709 		sc->sc_ss = 0;
2710 	}
2711 	if (sc->sc_ios) {
2712 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2713 		sc->sc_ios = 0;
2714 	}
2715 	if (sc->sc_flashs) {
2716 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2717 		sc->sc_flashs = 0;
2718 	}
2719 
2720 	if (sc->sc_core_lock)
2721 		mutex_obj_free(sc->sc_core_lock);
2722 	if (sc->sc_ich_phymtx)
2723 		mutex_obj_free(sc->sc_ich_phymtx);
2724 	if (sc->sc_ich_nvmmtx)
2725 		mutex_obj_free(sc->sc_ich_nvmmtx);
2726 
2727 	return 0;
2728 }
2729 
2730 static bool
2731 wm_suspend(device_t self, const pmf_qual_t *qual)
2732 {
2733 	struct wm_softc *sc = device_private(self);
2734 
2735 	wm_release_manageability(sc);
2736 	wm_release_hw_control(sc);
2737 	wm_enable_wakeup(sc);
2738 
2739 	return true;
2740 }
2741 
2742 static bool
2743 wm_resume(device_t self, const pmf_qual_t *qual)
2744 {
2745 	struct wm_softc *sc = device_private(self);
2746 
2747 	wm_init_manageability(sc);
2748 
2749 	return true;
2750 }
2751 
2752 /*
2753  * wm_watchdog:		[ifnet interface function]
2754  *
2755  *	Watchdog timer handler.
2756  */
2757 static void
2758 wm_watchdog(struct ifnet *ifp)
2759 {
2760 	int qid;
2761 	struct wm_softc *sc = ifp->if_softc;
2762 
2763 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
2764 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2765 
2766 		wm_watchdog_txq(ifp, txq);
2767 	}
2768 
2769 	/* Reset the interface. */
2770 	(void) wm_init(ifp);
2771 
2772 	/*
2773 	 * There are still some upper layer processing which call
2774 	 * ifp->if_start(). e.g. ALTQ
2775 	 */
2776 	/* Try to get more packets going. */
2777 	ifp->if_start(ifp);
2778 }
2779 
2780 static void
2781 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq)
2782 {
2783 	struct wm_softc *sc = ifp->if_softc;
2784 
2785 	/*
2786 	 * Since we're using delayed interrupts, sweep up
2787 	 * before we report an error.
2788 	 */
2789 	mutex_enter(txq->txq_lock);
2790 	wm_txeof(sc, txq);
2791 	mutex_exit(txq->txq_lock);
2792 
2793 	if (txq->txq_free != WM_NTXDESC(txq)) {
2794 #ifdef WM_DEBUG
2795 		int i, j;
2796 		struct wm_txsoft *txs;
2797 #endif
2798 		log(LOG_ERR,
2799 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
2800 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
2801 		    txq->txq_next);
2802 		ifp->if_oerrors++;
2803 #ifdef WM_DEBUG
2804 		for (i = txq->txq_sdirty; i != txq->txq_snext ;
2805 		    i = WM_NEXTTXS(txq, i)) {
2806 		    txs = &txq->txq_soft[i];
2807 		    printf("txs %d tx %d -> %d\n",
2808 			i, txs->txs_firstdesc, txs->txs_lastdesc);
2809 		    for (j = txs->txs_firstdesc; ;
2810 			j = WM_NEXTTX(txq, j)) {
2811 			printf("\tdesc %d: 0x%" PRIx64 "\n", j,
2812 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
2813 			printf("\t %#08x%08x\n",
2814 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
2815 			    txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
2816 			if (j == txs->txs_lastdesc)
2817 				break;
2818 			}
2819 		}
2820 #endif
2821 	}
2822 }
2823 
2824 /*
2825  * wm_tick:
2826  *
2827  *	One second timer, used to check link status, sweep up
2828  *	completed transmit jobs, etc.
2829  */
2830 static void
2831 wm_tick(void *arg)
2832 {
2833 	struct wm_softc *sc = arg;
2834 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2835 #ifndef WM_MPSAFE
2836 	int s = splnet();
2837 #endif
2838 
2839 	WM_CORE_LOCK(sc);
2840 
2841 	if (sc->sc_core_stopping)
2842 		goto out;
2843 
2844 	if (sc->sc_type >= WM_T_82542_2_1) {
2845 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
2846 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
2847 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
2848 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
2849 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
2850 	}
2851 
2852 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
2853 	ifp->if_ierrors += 0ULL + /* ensure quad_t */
2854 	    + CSR_READ(sc, WMREG_CRCERRS)
2855 	    + CSR_READ(sc, WMREG_ALGNERRC)
2856 	    + CSR_READ(sc, WMREG_SYMERRC)
2857 	    + CSR_READ(sc, WMREG_RXERRC)
2858 	    + CSR_READ(sc, WMREG_SEC)
2859 	    + CSR_READ(sc, WMREG_CEXTERR)
2860 	    + CSR_READ(sc, WMREG_RLEC);
2861 	/*
2862 	 * WMREG_RNBC is incremented when there is no available buffers in host
2863 	 * memory. It does not mean the number of dropped packet. Because
2864 	 * ethernet controller can receive packets in such case if there is
2865 	 * space in phy's FIFO.
2866 	 *
2867 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
2868 	 * own EVCNT instead of if_iqdrops.
2869 	 */
2870 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
2871 
2872 	if (sc->sc_flags & WM_F_HAS_MII)
2873 		mii_tick(&sc->sc_mii);
2874 	else if ((sc->sc_type >= WM_T_82575)
2875 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
2876 		wm_serdes_tick(sc);
2877 	else
2878 		wm_tbi_tick(sc);
2879 
2880 out:
2881 	WM_CORE_UNLOCK(sc);
2882 #ifndef WM_MPSAFE
2883 	splx(s);
2884 #endif
2885 
2886 	if (!sc->sc_core_stopping)
2887 		callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
2888 }
2889 
2890 static int
2891 wm_ifflags_cb(struct ethercom *ec)
2892 {
2893 	struct ifnet *ifp = &ec->ec_if;
2894 	struct wm_softc *sc = ifp->if_softc;
2895 	int rc = 0;
2896 
2897 	WM_CORE_LOCK(sc);
2898 
2899 	int change = ifp->if_flags ^ sc->sc_if_flags;
2900 	sc->sc_if_flags = ifp->if_flags;
2901 
2902 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2903 		rc = ENETRESET;
2904 		goto out;
2905 	}
2906 
2907 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
2908 		wm_set_filter(sc);
2909 
2910 	wm_set_vlan(sc);
2911 
2912 out:
2913 	WM_CORE_UNLOCK(sc);
2914 
2915 	return rc;
2916 }
2917 
2918 /*
2919  * wm_ioctl:		[ifnet interface function]
2920  *
2921  *	Handle control requests from the operator.
2922  */
2923 static int
2924 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
2925 {
2926 	struct wm_softc *sc = ifp->if_softc;
2927 	struct ifreq *ifr = (struct ifreq *) data;
2928 	struct ifaddr *ifa = (struct ifaddr *)data;
2929 	struct sockaddr_dl *sdl;
2930 	int s, error;
2931 
2932 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
2933 		device_xname(sc->sc_dev), __func__));
2934 
2935 #ifndef WM_MPSAFE
2936 	s = splnet();
2937 #endif
2938 	switch (cmd) {
2939 	case SIOCSIFMEDIA:
2940 	case SIOCGIFMEDIA:
2941 		WM_CORE_LOCK(sc);
2942 		/* Flow control requires full-duplex mode. */
2943 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
2944 		    (ifr->ifr_media & IFM_FDX) == 0)
2945 			ifr->ifr_media &= ~IFM_ETH_FMASK;
2946 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
2947 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
2948 				/* We can do both TXPAUSE and RXPAUSE. */
2949 				ifr->ifr_media |=
2950 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2951 			}
2952 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
2953 		}
2954 		WM_CORE_UNLOCK(sc);
2955 #ifdef WM_MPSAFE
2956 		s = splnet();
2957 #endif
2958 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
2959 #ifdef WM_MPSAFE
2960 		splx(s);
2961 #endif
2962 		break;
2963 	case SIOCINITIFADDR:
2964 		WM_CORE_LOCK(sc);
2965 		if (ifa->ifa_addr->sa_family == AF_LINK) {
2966 			sdl = satosdl(ifp->if_dl->ifa_addr);
2967 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
2968 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
2969 			/* unicast address is first multicast entry */
2970 			wm_set_filter(sc);
2971 			error = 0;
2972 			WM_CORE_UNLOCK(sc);
2973 			break;
2974 		}
2975 		WM_CORE_UNLOCK(sc);
2976 		/*FALLTHROUGH*/
2977 	default:
2978 #ifdef WM_MPSAFE
2979 		s = splnet();
2980 #endif
2981 		/* It may call wm_start, so unlock here */
2982 		error = ether_ioctl(ifp, cmd, data);
2983 #ifdef WM_MPSAFE
2984 		splx(s);
2985 #endif
2986 		if (error != ENETRESET)
2987 			break;
2988 
2989 		error = 0;
2990 
2991 		if (cmd == SIOCSIFCAP) {
2992 			error = (*ifp->if_init)(ifp);
2993 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
2994 			;
2995 		else if (ifp->if_flags & IFF_RUNNING) {
2996 			/*
2997 			 * Multicast list has changed; set the hardware filter
2998 			 * accordingly.
2999 			 */
3000 			WM_CORE_LOCK(sc);
3001 			wm_set_filter(sc);
3002 			WM_CORE_UNLOCK(sc);
3003 		}
3004 		break;
3005 	}
3006 
3007 #ifndef WM_MPSAFE
3008 	splx(s);
3009 #endif
3010 	return error;
3011 }
3012 
3013 /* MAC address related */
3014 
3015 /*
3016  * Get the offset of MAC address and return it.
3017  * If error occured, use offset 0.
3018  */
3019 static uint16_t
3020 wm_check_alt_mac_addr(struct wm_softc *sc)
3021 {
3022 	uint16_t myea[ETHER_ADDR_LEN / 2];
3023 	uint16_t offset = NVM_OFF_MACADDR;
3024 
3025 	/* Try to read alternative MAC address pointer */
3026 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3027 		return 0;
3028 
3029 	/* Check pointer if it's valid or not. */
3030 	if ((offset == 0x0000) || (offset == 0xffff))
3031 		return 0;
3032 
3033 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3034 	/*
3035 	 * Check whether alternative MAC address is valid or not.
3036 	 * Some cards have non 0xffff pointer but those don't use
3037 	 * alternative MAC address in reality.
3038 	 *
3039 	 * Check whether the broadcast bit is set or not.
3040 	 */
3041 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3042 		if (((myea[0] & 0xff) & 0x01) == 0)
3043 			return offset; /* Found */
3044 
3045 	/* Not found */
3046 	return 0;
3047 }
3048 
3049 static int
3050 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3051 {
3052 	uint16_t myea[ETHER_ADDR_LEN / 2];
3053 	uint16_t offset = NVM_OFF_MACADDR;
3054 	int do_invert = 0;
3055 
3056 	switch (sc->sc_type) {
3057 	case WM_T_82580:
3058 	case WM_T_I350:
3059 	case WM_T_I354:
3060 		/* EEPROM Top Level Partitioning */
3061 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3062 		break;
3063 	case WM_T_82571:
3064 	case WM_T_82575:
3065 	case WM_T_82576:
3066 	case WM_T_80003:
3067 	case WM_T_I210:
3068 	case WM_T_I211:
3069 		offset = wm_check_alt_mac_addr(sc);
3070 		if (offset == 0)
3071 			if ((sc->sc_funcid & 0x01) == 1)
3072 				do_invert = 1;
3073 		break;
3074 	default:
3075 		if ((sc->sc_funcid & 0x01) == 1)
3076 			do_invert = 1;
3077 		break;
3078 	}
3079 
3080 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3081 		goto bad;
3082 
3083 	enaddr[0] = myea[0] & 0xff;
3084 	enaddr[1] = myea[0] >> 8;
3085 	enaddr[2] = myea[1] & 0xff;
3086 	enaddr[3] = myea[1] >> 8;
3087 	enaddr[4] = myea[2] & 0xff;
3088 	enaddr[5] = myea[2] >> 8;
3089 
3090 	/*
3091 	 * Toggle the LSB of the MAC address on the second port
3092 	 * of some dual port cards.
3093 	 */
3094 	if (do_invert != 0)
3095 		enaddr[5] ^= 1;
3096 
3097 	return 0;
3098 
3099  bad:
3100 	return -1;
3101 }
3102 
3103 /*
3104  * wm_set_ral:
3105  *
3106  *	Set an entery in the receive address list.
3107  */
3108 static void
3109 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3110 {
3111 	uint32_t ral_lo, ral_hi;
3112 
3113 	if (enaddr != NULL) {
3114 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3115 		    (enaddr[3] << 24);
3116 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3117 		ral_hi |= RAL_AV;
3118 	} else {
3119 		ral_lo = 0;
3120 		ral_hi = 0;
3121 	}
3122 
3123 	if (sc->sc_type >= WM_T_82544) {
3124 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_CORDOVA_RAL_BASE, idx),
3125 		    ral_lo);
3126 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_CORDOVA_RAL_BASE, idx),
3127 		    ral_hi);
3128 	} else {
3129 		CSR_WRITE(sc, WMREG_RAL_LO(WMREG_RAL_BASE, idx), ral_lo);
3130 		CSR_WRITE(sc, WMREG_RAL_HI(WMREG_RAL_BASE, idx), ral_hi);
3131 	}
3132 }
3133 
3134 /*
3135  * wm_mchash:
3136  *
3137  *	Compute the hash of the multicast address for the 4096-bit
3138  *	multicast filter.
3139  */
3140 static uint32_t
3141 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3142 {
3143 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3144 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3145 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3146 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3147 	uint32_t hash;
3148 
3149 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3150 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3151 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3152 	    || (sc->sc_type == WM_T_PCH_SPT)) {
3153 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3154 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3155 		return (hash & 0x3ff);
3156 	}
3157 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3158 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3159 
3160 	return (hash & 0xfff);
3161 }
3162 
3163 /*
3164  * wm_set_filter:
3165  *
3166  *	Set up the receive filter.
3167  */
3168 static void
3169 wm_set_filter(struct wm_softc *sc)
3170 {
3171 	struct ethercom *ec = &sc->sc_ethercom;
3172 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3173 	struct ether_multi *enm;
3174 	struct ether_multistep step;
3175 	bus_addr_t mta_reg;
3176 	uint32_t hash, reg, bit;
3177 	int i, size, ralmax;
3178 
3179 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3180 		device_xname(sc->sc_dev), __func__));
3181 
3182 	if (sc->sc_type >= WM_T_82544)
3183 		mta_reg = WMREG_CORDOVA_MTA;
3184 	else
3185 		mta_reg = WMREG_MTA;
3186 
3187 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3188 
3189 	if (ifp->if_flags & IFF_BROADCAST)
3190 		sc->sc_rctl |= RCTL_BAM;
3191 	if (ifp->if_flags & IFF_PROMISC) {
3192 		sc->sc_rctl |= RCTL_UPE;
3193 		goto allmulti;
3194 	}
3195 
3196 	/*
3197 	 * Set the station address in the first RAL slot, and
3198 	 * clear the remaining slots.
3199 	 */
3200 	if (sc->sc_type == WM_T_ICH8)
3201 		size = WM_RAL_TABSIZE_ICH8 -1;
3202 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3203 	    || (sc->sc_type == WM_T_PCH))
3204 		size = WM_RAL_TABSIZE_ICH8;
3205 	else if (sc->sc_type == WM_T_PCH2)
3206 		size = WM_RAL_TABSIZE_PCH2;
3207 	else if ((sc->sc_type == WM_T_PCH_LPT) ||(sc->sc_type == WM_T_PCH_SPT))
3208 		size = WM_RAL_TABSIZE_PCH_LPT;
3209 	else if (sc->sc_type == WM_T_82575)
3210 		size = WM_RAL_TABSIZE_82575;
3211 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3212 		size = WM_RAL_TABSIZE_82576;
3213 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3214 		size = WM_RAL_TABSIZE_I350;
3215 	else
3216 		size = WM_RAL_TABSIZE;
3217 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3218 
3219 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
3220 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3221 		switch (i) {
3222 		case 0:
3223 			/* We can use all entries */
3224 			ralmax = size;
3225 			break;
3226 		case 1:
3227 			/* Only RAR[0] */
3228 			ralmax = 1;
3229 			break;
3230 		default:
3231 			/* available SHRA + RAR[0] */
3232 			ralmax = i + 1;
3233 		}
3234 	} else
3235 		ralmax = size;
3236 	for (i = 1; i < size; i++) {
3237 		if (i < ralmax)
3238 			wm_set_ral(sc, NULL, i);
3239 	}
3240 
3241 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3242 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3243 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3244 	    || (sc->sc_type == WM_T_PCH_SPT))
3245 		size = WM_ICH8_MC_TABSIZE;
3246 	else
3247 		size = WM_MC_TABSIZE;
3248 	/* Clear out the multicast table. */
3249 	for (i = 0; i < size; i++)
3250 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3251 
3252 	ETHER_LOCK(ec);
3253 	ETHER_FIRST_MULTI(step, ec, enm);
3254 	while (enm != NULL) {
3255 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3256 			ETHER_UNLOCK(ec);
3257 			/*
3258 			 * We must listen to a range of multicast addresses.
3259 			 * For now, just accept all multicasts, rather than
3260 			 * trying to set only those filter bits needed to match
3261 			 * the range.  (At this time, the only use of address
3262 			 * ranges is for IP multicast routing, for which the
3263 			 * range is big enough to require all bits set.)
3264 			 */
3265 			goto allmulti;
3266 		}
3267 
3268 		hash = wm_mchash(sc, enm->enm_addrlo);
3269 
3270 		reg = (hash >> 5);
3271 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3272 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3273 		    || (sc->sc_type == WM_T_PCH2)
3274 		    || (sc->sc_type == WM_T_PCH_LPT)
3275 		    || (sc->sc_type == WM_T_PCH_SPT))
3276 			reg &= 0x1f;
3277 		else
3278 			reg &= 0x7f;
3279 		bit = hash & 0x1f;
3280 
3281 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3282 		hash |= 1U << bit;
3283 
3284 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3285 			/*
3286 			 * 82544 Errata 9: Certain register cannot be written
3287 			 * with particular alignments in PCI-X bus operation
3288 			 * (FCAH, MTA and VFTA).
3289 			 */
3290 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3291 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3292 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3293 		} else
3294 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3295 
3296 		ETHER_NEXT_MULTI(step, enm);
3297 	}
3298 	ETHER_UNLOCK(ec);
3299 
3300 	ifp->if_flags &= ~IFF_ALLMULTI;
3301 	goto setit;
3302 
3303  allmulti:
3304 	ifp->if_flags |= IFF_ALLMULTI;
3305 	sc->sc_rctl |= RCTL_MPE;
3306 
3307  setit:
3308 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3309 }
3310 
3311 /* Reset and init related */
3312 
3313 static void
3314 wm_set_vlan(struct wm_softc *sc)
3315 {
3316 
3317 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3318 		device_xname(sc->sc_dev), __func__));
3319 
3320 	/* Deal with VLAN enables. */
3321 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3322 		sc->sc_ctrl |= CTRL_VME;
3323 	else
3324 		sc->sc_ctrl &= ~CTRL_VME;
3325 
3326 	/* Write the control registers. */
3327 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3328 }
3329 
3330 static void
3331 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3332 {
3333 	uint32_t gcr;
3334 	pcireg_t ctrl2;
3335 
3336 	gcr = CSR_READ(sc, WMREG_GCR);
3337 
3338 	/* Only take action if timeout value is defaulted to 0 */
3339 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3340 		goto out;
3341 
3342 	if ((gcr & GCR_CAP_VER2) == 0) {
3343 		gcr |= GCR_CMPL_TMOUT_10MS;
3344 		goto out;
3345 	}
3346 
3347 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3348 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3349 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3350 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3351 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3352 
3353 out:
3354 	/* Disable completion timeout resend */
3355 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3356 
3357 	CSR_WRITE(sc, WMREG_GCR, gcr);
3358 }
3359 
3360 void
3361 wm_get_auto_rd_done(struct wm_softc *sc)
3362 {
3363 	int i;
3364 
3365 	/* wait for eeprom to reload */
3366 	switch (sc->sc_type) {
3367 	case WM_T_82571:
3368 	case WM_T_82572:
3369 	case WM_T_82573:
3370 	case WM_T_82574:
3371 	case WM_T_82583:
3372 	case WM_T_82575:
3373 	case WM_T_82576:
3374 	case WM_T_82580:
3375 	case WM_T_I350:
3376 	case WM_T_I354:
3377 	case WM_T_I210:
3378 	case WM_T_I211:
3379 	case WM_T_80003:
3380 	case WM_T_ICH8:
3381 	case WM_T_ICH9:
3382 		for (i = 0; i < 10; i++) {
3383 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3384 				break;
3385 			delay(1000);
3386 		}
3387 		if (i == 10) {
3388 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3389 			    "complete\n", device_xname(sc->sc_dev));
3390 		}
3391 		break;
3392 	default:
3393 		break;
3394 	}
3395 }
3396 
3397 void
3398 wm_lan_init_done(struct wm_softc *sc)
3399 {
3400 	uint32_t reg = 0;
3401 	int i;
3402 
3403 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3404 		device_xname(sc->sc_dev), __func__));
3405 
3406 	/* Wait for eeprom to reload */
3407 	switch (sc->sc_type) {
3408 	case WM_T_ICH10:
3409 	case WM_T_PCH:
3410 	case WM_T_PCH2:
3411 	case WM_T_PCH_LPT:
3412 	case WM_T_PCH_SPT:
3413 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3414 			reg = CSR_READ(sc, WMREG_STATUS);
3415 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3416 				break;
3417 			delay(100);
3418 		}
3419 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3420 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3421 			    "complete\n", device_xname(sc->sc_dev), __func__);
3422 		}
3423 		break;
3424 	default:
3425 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3426 		    __func__);
3427 		break;
3428 	}
3429 
3430 	reg &= ~STATUS_LAN_INIT_DONE;
3431 	CSR_WRITE(sc, WMREG_STATUS, reg);
3432 }
3433 
3434 void
3435 wm_get_cfg_done(struct wm_softc *sc)
3436 {
3437 	int mask;
3438 	uint32_t reg;
3439 	int i;
3440 
3441 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3442 		device_xname(sc->sc_dev), __func__));
3443 
3444 	/* Wait for eeprom to reload */
3445 	switch (sc->sc_type) {
3446 	case WM_T_82542_2_0:
3447 	case WM_T_82542_2_1:
3448 		/* null */
3449 		break;
3450 	case WM_T_82543:
3451 	case WM_T_82544:
3452 	case WM_T_82540:
3453 	case WM_T_82545:
3454 	case WM_T_82545_3:
3455 	case WM_T_82546:
3456 	case WM_T_82546_3:
3457 	case WM_T_82541:
3458 	case WM_T_82541_2:
3459 	case WM_T_82547:
3460 	case WM_T_82547_2:
3461 	case WM_T_82573:
3462 	case WM_T_82574:
3463 	case WM_T_82583:
3464 		/* generic */
3465 		delay(10*1000);
3466 		break;
3467 	case WM_T_80003:
3468 	case WM_T_82571:
3469 	case WM_T_82572:
3470 	case WM_T_82575:
3471 	case WM_T_82576:
3472 	case WM_T_82580:
3473 	case WM_T_I350:
3474 	case WM_T_I354:
3475 	case WM_T_I210:
3476 	case WM_T_I211:
3477 		if (sc->sc_type == WM_T_82571) {
3478 			/* Only 82571 shares port 0 */
3479 			mask = EEMNGCTL_CFGDONE_0;
3480 		} else
3481 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3482 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3483 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3484 				break;
3485 			delay(1000);
3486 		}
3487 		if (i >= WM_PHY_CFG_TIMEOUT) {
3488 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3489 				device_xname(sc->sc_dev), __func__));
3490 		}
3491 		break;
3492 	case WM_T_ICH8:
3493 	case WM_T_ICH9:
3494 	case WM_T_ICH10:
3495 	case WM_T_PCH:
3496 	case WM_T_PCH2:
3497 	case WM_T_PCH_LPT:
3498 	case WM_T_PCH_SPT:
3499 		delay(10*1000);
3500 		if (sc->sc_type >= WM_T_ICH10)
3501 			wm_lan_init_done(sc);
3502 		else
3503 			wm_get_auto_rd_done(sc);
3504 
3505 		reg = CSR_READ(sc, WMREG_STATUS);
3506 		if ((reg & STATUS_PHYRA) != 0)
3507 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3508 		break;
3509 	default:
3510 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3511 		    __func__);
3512 		break;
3513 	}
3514 }
3515 
3516 /* Init hardware bits */
3517 void
3518 wm_initialize_hardware_bits(struct wm_softc *sc)
3519 {
3520 	uint32_t tarc0, tarc1, reg;
3521 
3522 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3523 		device_xname(sc->sc_dev), __func__));
3524 
3525 	/* For 82571 variant, 80003 and ICHs */
3526 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
3527 	    || (sc->sc_type >= WM_T_80003)) {
3528 
3529 		/* Transmit Descriptor Control 0 */
3530 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
3531 		reg |= TXDCTL_COUNT_DESC;
3532 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
3533 
3534 		/* Transmit Descriptor Control 1 */
3535 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
3536 		reg |= TXDCTL_COUNT_DESC;
3537 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
3538 
3539 		/* TARC0 */
3540 		tarc0 = CSR_READ(sc, WMREG_TARC0);
3541 		switch (sc->sc_type) {
3542 		case WM_T_82571:
3543 		case WM_T_82572:
3544 		case WM_T_82573:
3545 		case WM_T_82574:
3546 		case WM_T_82583:
3547 		case WM_T_80003:
3548 			/* Clear bits 30..27 */
3549 			tarc0 &= ~__BITS(30, 27);
3550 			break;
3551 		default:
3552 			break;
3553 		}
3554 
3555 		switch (sc->sc_type) {
3556 		case WM_T_82571:
3557 		case WM_T_82572:
3558 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
3559 
3560 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3561 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
3562 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
3563 			/* 8257[12] Errata No.7 */
3564 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
3565 
3566 			/* TARC1 bit 28 */
3567 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3568 				tarc1 &= ~__BIT(28);
3569 			else
3570 				tarc1 |= __BIT(28);
3571 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3572 
3573 			/*
3574 			 * 8257[12] Errata No.13
3575 			 * Disable Dyamic Clock Gating.
3576 			 */
3577 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3578 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
3579 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3580 			break;
3581 		case WM_T_82573:
3582 		case WM_T_82574:
3583 		case WM_T_82583:
3584 			if ((sc->sc_type == WM_T_82574)
3585 			    || (sc->sc_type == WM_T_82583))
3586 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
3587 
3588 			/* Extended Device Control */
3589 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3590 			reg &= ~__BIT(23);	/* Clear bit 23 */
3591 			reg |= __BIT(22);	/* Set bit 22 */
3592 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3593 
3594 			/* Device Control */
3595 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
3596 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3597 
3598 			/* PCIe Control Register */
3599 			/*
3600 			 * 82573 Errata (unknown).
3601 			 *
3602 			 * 82574 Errata 25 and 82583 Errata 12
3603 			 * "Dropped Rx Packets":
3604 			 *   NVM Image Version 2.1.4 and newer has no this bug.
3605 			 */
3606 			reg = CSR_READ(sc, WMREG_GCR);
3607 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
3608 			CSR_WRITE(sc, WMREG_GCR, reg);
3609 
3610 			if ((sc->sc_type == WM_T_82574)
3611 			    || (sc->sc_type == WM_T_82583)) {
3612 				/*
3613 				 * Document says this bit must be set for
3614 				 * proper operation.
3615 				 */
3616 				reg = CSR_READ(sc, WMREG_GCR);
3617 				reg |= __BIT(22);
3618 				CSR_WRITE(sc, WMREG_GCR, reg);
3619 
3620 				/*
3621 				 * Apply workaround for hardware errata
3622 				 * documented in errata docs Fixes issue where
3623 				 * some error prone or unreliable PCIe
3624 				 * completions are occurring, particularly
3625 				 * with ASPM enabled. Without fix, issue can
3626 				 * cause Tx timeouts.
3627 				 */
3628 				reg = CSR_READ(sc, WMREG_GCR2);
3629 				reg |= __BIT(0);
3630 				CSR_WRITE(sc, WMREG_GCR2, reg);
3631 			}
3632 			break;
3633 		case WM_T_80003:
3634 			/* TARC0 */
3635 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
3636 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3637 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
3638 
3639 			/* TARC1 bit 28 */
3640 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3641 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3642 				tarc1 &= ~__BIT(28);
3643 			else
3644 				tarc1 |= __BIT(28);
3645 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3646 			break;
3647 		case WM_T_ICH8:
3648 		case WM_T_ICH9:
3649 		case WM_T_ICH10:
3650 		case WM_T_PCH:
3651 		case WM_T_PCH2:
3652 		case WM_T_PCH_LPT:
3653 		case WM_T_PCH_SPT:
3654 			/* TARC0 */
3655 			if ((sc->sc_type == WM_T_ICH8)
3656 			    || (sc->sc_type == WM_T_PCH_SPT)) {
3657 				/* Set TARC0 bits 29 and 28 */
3658 				tarc0 |= __BITS(29, 28);
3659 			}
3660 			/* Set TARC0 bits 23,24,26,27 */
3661 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
3662 
3663 			/* CTRL_EXT */
3664 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
3665 			reg |= __BIT(22);	/* Set bit 22 */
3666 			/*
3667 			 * Enable PHY low-power state when MAC is at D3
3668 			 * w/o WoL
3669 			 */
3670 			if (sc->sc_type >= WM_T_PCH)
3671 				reg |= CTRL_EXT_PHYPDEN;
3672 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
3673 
3674 			/* TARC1 */
3675 			tarc1 = CSR_READ(sc, WMREG_TARC1);
3676 			/* bit 28 */
3677 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
3678 				tarc1 &= ~__BIT(28);
3679 			else
3680 				tarc1 |= __BIT(28);
3681 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
3682 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
3683 
3684 			/* Device Status */
3685 			if (sc->sc_type == WM_T_ICH8) {
3686 				reg = CSR_READ(sc, WMREG_STATUS);
3687 				reg &= ~__BIT(31);
3688 				CSR_WRITE(sc, WMREG_STATUS, reg);
3689 
3690 			}
3691 
3692 			/* IOSFPC */
3693 			if (sc->sc_type == WM_T_PCH_SPT) {
3694 				reg = CSR_READ(sc, WMREG_IOSFPC);
3695 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
3696 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
3697 			}
3698 			/*
3699 			 * Work-around descriptor data corruption issue during
3700 			 * NFS v2 UDP traffic, just disable the NFS filtering
3701 			 * capability.
3702 			 */
3703 			reg = CSR_READ(sc, WMREG_RFCTL);
3704 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
3705 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3706 			break;
3707 		default:
3708 			break;
3709 		}
3710 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
3711 
3712 		switch (sc->sc_type) {
3713 		/*
3714 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
3715 		 * Avoid RSS Hash Value bug.
3716 		 */
3717 		case WM_T_82571:
3718 		case WM_T_82572:
3719 		case WM_T_82573:
3720 		case WM_T_80003:
3721 		case WM_T_ICH8:
3722 			reg = CSR_READ(sc, WMREG_RFCTL);
3723 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
3724 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3725 			break;
3726 		/*
3727 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
3728 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
3729 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
3730 		 * Correctly by the Device"
3731 		 *
3732 		 * I354(C2000) Errata AVR53:
3733 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
3734 		 * Hang"
3735 		 */
3736 		case WM_T_82575:
3737 		case WM_T_82576:
3738 		case WM_T_82580:
3739 		case WM_T_I350:
3740 		case WM_T_I210:
3741 		case WM_T_I211:
3742 		case WM_T_I354:
3743 			reg = CSR_READ(sc, WMREG_RFCTL);
3744 			reg |= WMREG_RFCTL_IPV6EXDIS;
3745 			CSR_WRITE(sc, WMREG_RFCTL, reg);
3746 			break;
3747 		default:
3748 			break;
3749 		}
3750 	}
3751 }
3752 
3753 static uint32_t
3754 wm_rxpbs_adjust_82580(uint32_t val)
3755 {
3756 	uint32_t rv = 0;
3757 
3758 	if (val < __arraycount(wm_82580_rxpbs_table))
3759 		rv = wm_82580_rxpbs_table[val];
3760 
3761 	return rv;
3762 }
3763 
3764 /*
3765  * wm_reset_phy:
3766  *
3767  *	generic PHY reset function.
3768  *	Same as e1000_phy_hw_reset_generic()
3769  */
3770 static void
3771 wm_reset_phy(struct wm_softc *sc)
3772 {
3773 	uint32_t reg;
3774 
3775 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3776 		device_xname(sc->sc_dev), __func__));
3777 	if (wm_phy_resetisblocked(sc))
3778 		return;
3779 
3780 	sc->phy.acquire(sc);
3781 
3782 	reg = CSR_READ(sc, WMREG_CTRL);
3783 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
3784 	CSR_WRITE_FLUSH(sc);
3785 
3786 	delay(sc->phy.reset_delay_us);
3787 
3788 	CSR_WRITE(sc, WMREG_CTRL, reg);
3789 	CSR_WRITE_FLUSH(sc);
3790 
3791 	delay(150);
3792 
3793 	sc->phy.release(sc);
3794 
3795 	wm_get_cfg_done(sc);
3796 }
3797 
3798 static void
3799 wm_flush_desc_rings(struct wm_softc *sc)
3800 {
3801 	pcireg_t preg;
3802 	uint32_t reg;
3803 	int nexttx;
3804 
3805 	/* First, disable MULR fix in FEXTNVM11 */
3806 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
3807 	reg |= FEXTNVM11_DIS_MULRFIX;
3808 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
3809 
3810 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
3811 	reg = CSR_READ(sc, WMREG_TDLEN(0));
3812 	if (((preg & DESCRING_STATUS_FLUSH_REQ) != 0) && (reg != 0)) {
3813 		struct wm_txqueue *txq;
3814 		wiseman_txdesc_t *txd;
3815 
3816 		/* TX */
3817 		printf("%s: Need TX flush (reg = %08x, len = %u)\n",
3818 		    device_xname(sc->sc_dev), preg, reg);
3819 		reg = CSR_READ(sc, WMREG_TCTL);
3820 		CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
3821 
3822 		txq = &sc->sc_queue[0].wmq_txq;
3823 		nexttx = txq->txq_next;
3824 		txd = &txq->txq_descs[nexttx];
3825 		wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
3826 		txd->wtx_cmdlen = htole32(WTX_CMD_IFCS| 512);
3827 		txd->wtx_fields.wtxu_status = 0;
3828 		txd->wtx_fields.wtxu_options = 0;
3829 		txd->wtx_fields.wtxu_vlan = 0;
3830 
3831 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
3832 			BUS_SPACE_BARRIER_WRITE);
3833 
3834 		txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
3835 		CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
3836 		bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
3837 			BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
3838 		delay(250);
3839 	}
3840 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
3841 	if (preg & DESCRING_STATUS_FLUSH_REQ) {
3842 		uint32_t rctl;
3843 
3844 		/* RX */
3845 		printf("%s: Need RX flush (reg = %08x)\n",
3846 		    device_xname(sc->sc_dev), preg);
3847 		rctl = CSR_READ(sc, WMREG_RCTL);
3848 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
3849 		CSR_WRITE_FLUSH(sc);
3850 		delay(150);
3851 
3852 		reg = CSR_READ(sc, WMREG_RXDCTL(0));
3853 		/* zero the lower 14 bits (prefetch and host thresholds) */
3854 		reg &= 0xffffc000;
3855 		/*
3856 		 * update thresholds: prefetch threshold to 31, host threshold
3857 		 * to 1 and make sure the granularity is "descriptors" and not
3858 		 * "cache lines"
3859 		 */
3860 		reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
3861 		CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
3862 
3863 		/*
3864 		 * momentarily enable the RX ring for the changes to take
3865 		 * effect
3866 		 */
3867 		CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
3868 		CSR_WRITE_FLUSH(sc);
3869 		delay(150);
3870 		CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
3871 	}
3872 }
3873 
3874 /*
3875  * wm_reset:
3876  *
3877  *	Reset the i82542 chip.
3878  */
3879 static void
3880 wm_reset(struct wm_softc *sc)
3881 {
3882 	int phy_reset = 0;
3883 	int i, error = 0;
3884 	uint32_t reg;
3885 
3886 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3887 		device_xname(sc->sc_dev), __func__));
3888 	KASSERT(sc->sc_type != 0);
3889 
3890 	/*
3891 	 * Allocate on-chip memory according to the MTU size.
3892 	 * The Packet Buffer Allocation register must be written
3893 	 * before the chip is reset.
3894 	 */
3895 	switch (sc->sc_type) {
3896 	case WM_T_82547:
3897 	case WM_T_82547_2:
3898 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3899 		    PBA_22K : PBA_30K;
3900 		for (i = 0; i < sc->sc_nqueues; i++) {
3901 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
3902 			txq->txq_fifo_head = 0;
3903 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
3904 			txq->txq_fifo_size =
3905 				(PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
3906 			txq->txq_fifo_stall = 0;
3907 		}
3908 		break;
3909 	case WM_T_82571:
3910 	case WM_T_82572:
3911 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
3912 	case WM_T_80003:
3913 		sc->sc_pba = PBA_32K;
3914 		break;
3915 	case WM_T_82573:
3916 		sc->sc_pba = PBA_12K;
3917 		break;
3918 	case WM_T_82574:
3919 	case WM_T_82583:
3920 		sc->sc_pba = PBA_20K;
3921 		break;
3922 	case WM_T_82576:
3923 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
3924 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
3925 		break;
3926 	case WM_T_82580:
3927 	case WM_T_I350:
3928 	case WM_T_I354:
3929 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
3930 		break;
3931 	case WM_T_I210:
3932 	case WM_T_I211:
3933 		sc->sc_pba = PBA_34K;
3934 		break;
3935 	case WM_T_ICH8:
3936 		/* Workaround for a bit corruption issue in FIFO memory */
3937 		sc->sc_pba = PBA_8K;
3938 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
3939 		break;
3940 	case WM_T_ICH9:
3941 	case WM_T_ICH10:
3942 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
3943 		    PBA_14K : PBA_10K;
3944 		break;
3945 	case WM_T_PCH:
3946 	case WM_T_PCH2:
3947 	case WM_T_PCH_LPT:
3948 	case WM_T_PCH_SPT:
3949 		sc->sc_pba = PBA_26K;
3950 		break;
3951 	default:
3952 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
3953 		    PBA_40K : PBA_48K;
3954 		break;
3955 	}
3956 	/*
3957 	 * Only old or non-multiqueue devices have the PBA register
3958 	 * XXX Need special handling for 82575.
3959 	 */
3960 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
3961 	    || (sc->sc_type == WM_T_82575))
3962 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
3963 
3964 	/* Prevent the PCI-E bus from sticking */
3965 	if (sc->sc_flags & WM_F_PCIE) {
3966 		int timeout = 800;
3967 
3968 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
3969 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3970 
3971 		while (timeout--) {
3972 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
3973 			    == 0)
3974 				break;
3975 			delay(100);
3976 		}
3977 	}
3978 
3979 	/* Set the completion timeout for interface */
3980 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
3981 	    || (sc->sc_type == WM_T_82580)
3982 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
3983 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
3984 		wm_set_pcie_completion_timeout(sc);
3985 
3986 	/* Clear interrupt */
3987 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
3988 	if (sc->sc_nintrs > 1) {
3989 		if (sc->sc_type != WM_T_82574) {
3990 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
3991 			CSR_WRITE(sc, WMREG_EIAC, 0);
3992 		} else {
3993 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
3994 		}
3995 	}
3996 
3997 	/* Stop the transmit and receive processes. */
3998 	CSR_WRITE(sc, WMREG_RCTL, 0);
3999 	sc->sc_rctl &= ~RCTL_EN;
4000 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4001 	CSR_WRITE_FLUSH(sc);
4002 
4003 	/* XXX set_tbi_sbp_82543() */
4004 
4005 	delay(10*1000);
4006 
4007 	/* Must acquire the MDIO ownership before MAC reset */
4008 	switch (sc->sc_type) {
4009 	case WM_T_82573:
4010 	case WM_T_82574:
4011 	case WM_T_82583:
4012 		error = wm_get_hw_semaphore_82573(sc);
4013 		break;
4014 	default:
4015 		break;
4016 	}
4017 
4018 	/*
4019 	 * 82541 Errata 29? & 82547 Errata 28?
4020 	 * See also the description about PHY_RST bit in CTRL register
4021 	 * in 8254x_GBe_SDM.pdf.
4022 	 */
4023 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4024 		CSR_WRITE(sc, WMREG_CTRL,
4025 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4026 		CSR_WRITE_FLUSH(sc);
4027 		delay(5000);
4028 	}
4029 
4030 	switch (sc->sc_type) {
4031 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4032 	case WM_T_82541:
4033 	case WM_T_82541_2:
4034 	case WM_T_82547:
4035 	case WM_T_82547_2:
4036 		/*
4037 		 * On some chipsets, a reset through a memory-mapped write
4038 		 * cycle can cause the chip to reset before completing the
4039 		 * write cycle.  This causes major headache that can be
4040 		 * avoided by issuing the reset via indirect register writes
4041 		 * through I/O space.
4042 		 *
4043 		 * So, if we successfully mapped the I/O BAR at attach time,
4044 		 * use that.  Otherwise, try our luck with a memory-mapped
4045 		 * reset.
4046 		 */
4047 		if (sc->sc_flags & WM_F_IOH_VALID)
4048 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4049 		else
4050 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4051 		break;
4052 	case WM_T_82545_3:
4053 	case WM_T_82546_3:
4054 		/* Use the shadow control register on these chips. */
4055 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4056 		break;
4057 	case WM_T_80003:
4058 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4059 		sc->phy.acquire(sc);
4060 		CSR_WRITE(sc, WMREG_CTRL, reg);
4061 		sc->phy.release(sc);
4062 		break;
4063 	case WM_T_ICH8:
4064 	case WM_T_ICH9:
4065 	case WM_T_ICH10:
4066 	case WM_T_PCH:
4067 	case WM_T_PCH2:
4068 	case WM_T_PCH_LPT:
4069 	case WM_T_PCH_SPT:
4070 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4071 		if (wm_phy_resetisblocked(sc) == false) {
4072 			/*
4073 			 * Gate automatic PHY configuration by hardware on
4074 			 * non-managed 82579
4075 			 */
4076 			if ((sc->sc_type == WM_T_PCH2)
4077 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4078 				== 0))
4079 				wm_gate_hw_phy_config_ich8lan(sc, true);
4080 
4081 			reg |= CTRL_PHY_RESET;
4082 			phy_reset = 1;
4083 		} else
4084 			printf("XXX reset is blocked!!!\n");
4085 		sc->phy.acquire(sc);
4086 		CSR_WRITE(sc, WMREG_CTRL, reg);
4087 		/* Don't insert a completion barrier when reset */
4088 		delay(20*1000);
4089 		mutex_exit(sc->sc_ich_phymtx);
4090 		break;
4091 	case WM_T_82580:
4092 	case WM_T_I350:
4093 	case WM_T_I354:
4094 	case WM_T_I210:
4095 	case WM_T_I211:
4096 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4097 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
4098 			CSR_WRITE_FLUSH(sc);
4099 		delay(5000);
4100 		break;
4101 	case WM_T_82542_2_0:
4102 	case WM_T_82542_2_1:
4103 	case WM_T_82543:
4104 	case WM_T_82540:
4105 	case WM_T_82545:
4106 	case WM_T_82546:
4107 	case WM_T_82571:
4108 	case WM_T_82572:
4109 	case WM_T_82573:
4110 	case WM_T_82574:
4111 	case WM_T_82575:
4112 	case WM_T_82576:
4113 	case WM_T_82583:
4114 	default:
4115 		/* Everything else can safely use the documented method. */
4116 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4117 		break;
4118 	}
4119 
4120 	/* Must release the MDIO ownership after MAC reset */
4121 	switch (sc->sc_type) {
4122 	case WM_T_82573:
4123 	case WM_T_82574:
4124 	case WM_T_82583:
4125 		if (error == 0)
4126 			wm_put_hw_semaphore_82573(sc);
4127 		break;
4128 	default:
4129 		break;
4130 	}
4131 
4132 	if (phy_reset != 0)
4133 		wm_get_cfg_done(sc);
4134 
4135 	/* reload EEPROM */
4136 	switch (sc->sc_type) {
4137 	case WM_T_82542_2_0:
4138 	case WM_T_82542_2_1:
4139 	case WM_T_82543:
4140 	case WM_T_82544:
4141 		delay(10);
4142 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4143 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4144 		CSR_WRITE_FLUSH(sc);
4145 		delay(2000);
4146 		break;
4147 	case WM_T_82540:
4148 	case WM_T_82545:
4149 	case WM_T_82545_3:
4150 	case WM_T_82546:
4151 	case WM_T_82546_3:
4152 		delay(5*1000);
4153 		/* XXX Disable HW ARPs on ASF enabled adapters */
4154 		break;
4155 	case WM_T_82541:
4156 	case WM_T_82541_2:
4157 	case WM_T_82547:
4158 	case WM_T_82547_2:
4159 		delay(20000);
4160 		/* XXX Disable HW ARPs on ASF enabled adapters */
4161 		break;
4162 	case WM_T_82571:
4163 	case WM_T_82572:
4164 	case WM_T_82573:
4165 	case WM_T_82574:
4166 	case WM_T_82583:
4167 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4168 			delay(10);
4169 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4170 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4171 			CSR_WRITE_FLUSH(sc);
4172 		}
4173 		/* check EECD_EE_AUTORD */
4174 		wm_get_auto_rd_done(sc);
4175 		/*
4176 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4177 		 * is set.
4178 		 */
4179 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4180 		    || (sc->sc_type == WM_T_82583))
4181 			delay(25*1000);
4182 		break;
4183 	case WM_T_82575:
4184 	case WM_T_82576:
4185 	case WM_T_82580:
4186 	case WM_T_I350:
4187 	case WM_T_I354:
4188 	case WM_T_I210:
4189 	case WM_T_I211:
4190 	case WM_T_80003:
4191 		/* check EECD_EE_AUTORD */
4192 		wm_get_auto_rd_done(sc);
4193 		break;
4194 	case WM_T_ICH8:
4195 	case WM_T_ICH9:
4196 	case WM_T_ICH10:
4197 	case WM_T_PCH:
4198 	case WM_T_PCH2:
4199 	case WM_T_PCH_LPT:
4200 	case WM_T_PCH_SPT:
4201 		break;
4202 	default:
4203 		panic("%s: unknown type\n", __func__);
4204 	}
4205 
4206 	/* Check whether EEPROM is present or not */
4207 	switch (sc->sc_type) {
4208 	case WM_T_82575:
4209 	case WM_T_82576:
4210 	case WM_T_82580:
4211 	case WM_T_I350:
4212 	case WM_T_I354:
4213 	case WM_T_ICH8:
4214 	case WM_T_ICH9:
4215 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4216 			/* Not found */
4217 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4218 			if (sc->sc_type == WM_T_82575)
4219 				wm_reset_init_script_82575(sc);
4220 		}
4221 		break;
4222 	default:
4223 		break;
4224 	}
4225 
4226 	if ((sc->sc_type == WM_T_82580)
4227 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4228 		/* clear global device reset status bit */
4229 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4230 	}
4231 
4232 	/* Clear any pending interrupt events. */
4233 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4234 	reg = CSR_READ(sc, WMREG_ICR);
4235 	if (sc->sc_nintrs > 1) {
4236 		if (sc->sc_type != WM_T_82574) {
4237 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4238 			CSR_WRITE(sc, WMREG_EIAC, 0);
4239 		} else
4240 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4241 	}
4242 
4243 	/* reload sc_ctrl */
4244 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4245 
4246 	if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4247 		wm_set_eee_i350(sc);
4248 
4249 	/* Clear the host wakeup bit after lcd reset */
4250 	if (sc->sc_type >= WM_T_PCH) {
4251 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
4252 		    BM_PORT_GEN_CFG);
4253 		reg &= ~BM_WUC_HOST_WU_BIT;
4254 		wm_gmii_hv_writereg(sc->sc_dev, 2,
4255 		    BM_PORT_GEN_CFG, reg);
4256 	}
4257 
4258 	/*
4259 	 * For PCH, this write will make sure that any noise will be detected
4260 	 * as a CRC error and be dropped rather than show up as a bad packet
4261 	 * to the DMA engine
4262 	 */
4263 	if (sc->sc_type == WM_T_PCH)
4264 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4265 
4266 	if (sc->sc_type >= WM_T_82544)
4267 		CSR_WRITE(sc, WMREG_WUC, 0);
4268 
4269 	wm_reset_mdicnfg_82580(sc);
4270 
4271 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4272 		wm_pll_workaround_i210(sc);
4273 }
4274 
4275 /*
4276  * wm_add_rxbuf:
4277  *
4278  *	Add a receive buffer to the indiciated descriptor.
4279  */
4280 static int
4281 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4282 {
4283 	struct wm_softc *sc = rxq->rxq_sc;
4284 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4285 	struct mbuf *m;
4286 	int error;
4287 
4288 	KASSERT(mutex_owned(rxq->rxq_lock));
4289 
4290 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4291 	if (m == NULL)
4292 		return ENOBUFS;
4293 
4294 	MCLGET(m, M_DONTWAIT);
4295 	if ((m->m_flags & M_EXT) == 0) {
4296 		m_freem(m);
4297 		return ENOBUFS;
4298 	}
4299 
4300 	if (rxs->rxs_mbuf != NULL)
4301 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4302 
4303 	rxs->rxs_mbuf = m;
4304 
4305 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4306 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4307 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
4308 	if (error) {
4309 		/* XXX XXX XXX */
4310 		aprint_error_dev(sc->sc_dev,
4311 		    "unable to load rx DMA map %d, error = %d\n",
4312 		    idx, error);
4313 		panic("wm_add_rxbuf");
4314 	}
4315 
4316 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4317 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4318 
4319 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4320 		if ((sc->sc_rctl & RCTL_EN) != 0)
4321 			wm_init_rxdesc(rxq, idx);
4322 	} else
4323 		wm_init_rxdesc(rxq, idx);
4324 
4325 	return 0;
4326 }
4327 
4328 /*
4329  * wm_rxdrain:
4330  *
4331  *	Drain the receive queue.
4332  */
4333 static void
4334 wm_rxdrain(struct wm_rxqueue *rxq)
4335 {
4336 	struct wm_softc *sc = rxq->rxq_sc;
4337 	struct wm_rxsoft *rxs;
4338 	int i;
4339 
4340 	KASSERT(mutex_owned(rxq->rxq_lock));
4341 
4342 	for (i = 0; i < WM_NRXDESC; i++) {
4343 		rxs = &rxq->rxq_soft[i];
4344 		if (rxs->rxs_mbuf != NULL) {
4345 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4346 			m_freem(rxs->rxs_mbuf);
4347 			rxs->rxs_mbuf = NULL;
4348 		}
4349 	}
4350 }
4351 
4352 
4353 /*
4354  * XXX copy from FreeBSD's sys/net/rss_config.c
4355  */
4356 /*
4357  * RSS secret key, intended to prevent attacks on load-balancing.  Its
4358  * effectiveness may be limited by algorithm choice and available entropy
4359  * during the boot.
4360  *
4361  * XXXRW: And that we don't randomize it yet!
4362  *
4363  * This is the default Microsoft RSS specification key which is also
4364  * the Chelsio T5 firmware default key.
4365  */
4366 #define RSS_KEYSIZE 40
4367 static uint8_t wm_rss_key[RSS_KEYSIZE] = {
4368 	0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
4369 	0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
4370 	0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
4371 	0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
4372 	0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
4373 };
4374 
4375 /*
4376  * Caller must pass an array of size sizeof(rss_key).
4377  *
4378  * XXX
4379  * As if_ixgbe may use this function, this function should not be
4380  * if_wm specific function.
4381  */
4382 static void
4383 wm_rss_getkey(uint8_t *key)
4384 {
4385 
4386 	memcpy(key, wm_rss_key, sizeof(wm_rss_key));
4387 }
4388 
4389 /*
4390  * Setup registers for RSS.
4391  *
4392  * XXX not yet VMDq support
4393  */
4394 static void
4395 wm_init_rss(struct wm_softc *sc)
4396 {
4397 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4398 	int i;
4399 
4400 	CTASSERT(sizeof(rss_key) == sizeof(wm_rss_key));
4401 
4402 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4403 		int qid, reta_ent;
4404 
4405 		qid  = i % sc->sc_nqueues;
4406 		switch(sc->sc_type) {
4407 		case WM_T_82574:
4408 			reta_ent = __SHIFTIN(qid,
4409 			    RETA_ENT_QINDEX_MASK_82574);
4410 			break;
4411 		case WM_T_82575:
4412 			reta_ent = __SHIFTIN(qid,
4413 			    RETA_ENT_QINDEX1_MASK_82575);
4414 			break;
4415 		default:
4416 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4417 			break;
4418 		}
4419 
4420 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4421 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4422 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4423 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4424 	}
4425 
4426 	wm_rss_getkey((uint8_t *)rss_key);
4427 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4428 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4429 
4430 	if (sc->sc_type == WM_T_82574)
4431 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4432 	else
4433 		mrqc = MRQC_ENABLE_RSS_MQ;
4434 
4435 	/*
4436 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
4437 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
4438 	 */
4439 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4440 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4441 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4442 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4443 
4444 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4445 }
4446 
4447 /*
4448  * Adjust TX and RX queue numbers which the system actulally uses.
4449  *
4450  * The numbers are affected by below parameters.
4451  *     - The nubmer of hardware queues
4452  *     - The number of MSI-X vectors (= "nvectors" argument)
4453  *     - ncpu
4454  */
4455 static void
4456 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4457 {
4458 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4459 
4460 	if (nvectors < 2) {
4461 		sc->sc_nqueues = 1;
4462 		return;
4463 	}
4464 
4465 	switch(sc->sc_type) {
4466 	case WM_T_82572:
4467 		hw_ntxqueues = 2;
4468 		hw_nrxqueues = 2;
4469 		break;
4470 	case WM_T_82574:
4471 		hw_ntxqueues = 2;
4472 		hw_nrxqueues = 2;
4473 		break;
4474 	case WM_T_82575:
4475 		hw_ntxqueues = 4;
4476 		hw_nrxqueues = 4;
4477 		break;
4478 	case WM_T_82576:
4479 		hw_ntxqueues = 16;
4480 		hw_nrxqueues = 16;
4481 		break;
4482 	case WM_T_82580:
4483 	case WM_T_I350:
4484 	case WM_T_I354:
4485 		hw_ntxqueues = 8;
4486 		hw_nrxqueues = 8;
4487 		break;
4488 	case WM_T_I210:
4489 		hw_ntxqueues = 4;
4490 		hw_nrxqueues = 4;
4491 		break;
4492 	case WM_T_I211:
4493 		hw_ntxqueues = 2;
4494 		hw_nrxqueues = 2;
4495 		break;
4496 		/*
4497 		 * As below ethernet controllers does not support MSI-X,
4498 		 * this driver let them not use multiqueue.
4499 		 *     - WM_T_80003
4500 		 *     - WM_T_ICH8
4501 		 *     - WM_T_ICH9
4502 		 *     - WM_T_ICH10
4503 		 *     - WM_T_PCH
4504 		 *     - WM_T_PCH2
4505 		 *     - WM_T_PCH_LPT
4506 		 */
4507 	default:
4508 		hw_ntxqueues = 1;
4509 		hw_nrxqueues = 1;
4510 		break;
4511 	}
4512 
4513 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
4514 
4515 	/*
4516 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
4517 	 * the number of queues used actually.
4518 	 */
4519 	if (nvectors < hw_nqueues + 1) {
4520 		sc->sc_nqueues = nvectors - 1;
4521 	} else {
4522 		sc->sc_nqueues = hw_nqueues;
4523 	}
4524 
4525 	/*
4526 	 * As queues more then cpus cannot improve scaling, we limit
4527 	 * the number of queues used actually.
4528 	 */
4529 	if (ncpu < sc->sc_nqueues)
4530 		sc->sc_nqueues = ncpu;
4531 }
4532 
4533 /*
4534  * Both single interrupt MSI and INTx can use this function.
4535  */
4536 static int
4537 wm_setup_legacy(struct wm_softc *sc)
4538 {
4539 	pci_chipset_tag_t pc = sc->sc_pc;
4540 	const char *intrstr = NULL;
4541 	char intrbuf[PCI_INTRSTR_LEN];
4542 	int error;
4543 
4544 	error = wm_alloc_txrx_queues(sc);
4545 	if (error) {
4546 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4547 		    error);
4548 		return ENOMEM;
4549 	}
4550 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
4551 	    sizeof(intrbuf));
4552 #ifdef WM_MPSAFE
4553 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
4554 #endif
4555 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
4556 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
4557 	if (sc->sc_ihs[0] == NULL) {
4558 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
4559 		    (pci_intr_type(pc, sc->sc_intrs[0])
4560 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
4561 		return ENOMEM;
4562 	}
4563 
4564 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
4565 	sc->sc_nintrs = 1;
4566 	return 0;
4567 }
4568 
4569 static int
4570 wm_setup_msix(struct wm_softc *sc)
4571 {
4572 	void *vih;
4573 	kcpuset_t *affinity;
4574 	int qidx, error, intr_idx, txrx_established;
4575 	pci_chipset_tag_t pc = sc->sc_pc;
4576 	const char *intrstr = NULL;
4577 	char intrbuf[PCI_INTRSTR_LEN];
4578 	char intr_xname[INTRDEVNAMEBUF];
4579 
4580 	if (sc->sc_nqueues < ncpu) {
4581 		/*
4582 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
4583 		 * interrupts start from CPU#1.
4584 		 */
4585 		sc->sc_affinity_offset = 1;
4586 	} else {
4587 		/*
4588 		 * In this case, this device use all CPUs. So, we unify
4589 		 * affinitied cpu_index to msix vector number for readability.
4590 		 */
4591 		sc->sc_affinity_offset = 0;
4592 	}
4593 
4594 	error = wm_alloc_txrx_queues(sc);
4595 	if (error) {
4596 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
4597 		    error);
4598 		return ENOMEM;
4599 	}
4600 
4601 	kcpuset_create(&affinity, false);
4602 	intr_idx = 0;
4603 
4604 	/*
4605 	 * TX and RX
4606 	 */
4607 	txrx_established = 0;
4608 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
4609 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4610 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
4611 
4612 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4613 		    sizeof(intrbuf));
4614 #ifdef WM_MPSAFE
4615 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
4616 		    PCI_INTR_MPSAFE, true);
4617 #endif
4618 		memset(intr_xname, 0, sizeof(intr_xname));
4619 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
4620 		    device_xname(sc->sc_dev), qidx);
4621 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4622 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
4623 		if (vih == NULL) {
4624 			aprint_error_dev(sc->sc_dev,
4625 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
4626 			    intrstr ? " at " : "",
4627 			    intrstr ? intrstr : "");
4628 
4629 			goto fail;
4630 		}
4631 		kcpuset_zero(affinity);
4632 		/* Round-robin affinity */
4633 		kcpuset_set(affinity, affinity_to);
4634 		error = interrupt_distribute(vih, affinity, NULL);
4635 		if (error == 0) {
4636 			aprint_normal_dev(sc->sc_dev,
4637 			    "for TX and RX interrupting at %s affinity to %u\n",
4638 			    intrstr, affinity_to);
4639 		} else {
4640 			aprint_normal_dev(sc->sc_dev,
4641 			    "for TX and RX interrupting at %s\n", intrstr);
4642 		}
4643 		sc->sc_ihs[intr_idx] = vih;
4644 		wmq->wmq_id= qidx;
4645 		wmq->wmq_intr_idx = intr_idx;
4646 
4647 		txrx_established++;
4648 		intr_idx++;
4649 	}
4650 
4651 	/*
4652 	 * LINK
4653 	 */
4654 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
4655 	    sizeof(intrbuf));
4656 #ifdef WM_MPSAFE
4657 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
4658 #endif
4659 	memset(intr_xname, 0, sizeof(intr_xname));
4660 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
4661 	    device_xname(sc->sc_dev));
4662 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
4663 		    IPL_NET, wm_linkintr_msix, sc, intr_xname);
4664 	if (vih == NULL) {
4665 		aprint_error_dev(sc->sc_dev,
4666 		    "unable to establish MSI-X(for LINK)%s%s\n",
4667 		    intrstr ? " at " : "",
4668 		    intrstr ? intrstr : "");
4669 
4670 		goto fail;
4671 	}
4672 	/* keep default affinity to LINK interrupt */
4673 	aprint_normal_dev(sc->sc_dev,
4674 	    "for LINK interrupting at %s\n", intrstr);
4675 	sc->sc_ihs[intr_idx] = vih;
4676 	sc->sc_link_intr_idx = intr_idx;
4677 
4678 	sc->sc_nintrs = sc->sc_nqueues + 1;
4679 	kcpuset_destroy(affinity);
4680 	return 0;
4681 
4682  fail:
4683 	for (qidx = 0; qidx < txrx_established; qidx++) {
4684 		struct wm_queue *wmq = &sc->sc_queue[qidx];
4685 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
4686 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
4687 	}
4688 
4689 	kcpuset_destroy(affinity);
4690 	return ENOMEM;
4691 }
4692 
4693 static void
4694 wm_turnon(struct wm_softc *sc)
4695 {
4696 	int i;
4697 
4698 	KASSERT(WM_CORE_LOCKED(sc));
4699 
4700 	for(i = 0; i < sc->sc_nqueues; i++) {
4701 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4702 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4703 
4704 		mutex_enter(txq->txq_lock);
4705 		txq->txq_stopping = false;
4706 		mutex_exit(txq->txq_lock);
4707 
4708 		mutex_enter(rxq->rxq_lock);
4709 		rxq->rxq_stopping = false;
4710 		mutex_exit(rxq->rxq_lock);
4711 	}
4712 
4713 	sc->sc_core_stopping = false;
4714 }
4715 
4716 static void
4717 wm_turnoff(struct wm_softc *sc)
4718 {
4719 	int i;
4720 
4721 	KASSERT(WM_CORE_LOCKED(sc));
4722 
4723 	sc->sc_core_stopping = true;
4724 
4725 	for(i = 0; i < sc->sc_nqueues; i++) {
4726 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
4727 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4728 
4729 		mutex_enter(rxq->rxq_lock);
4730 		rxq->rxq_stopping = true;
4731 		mutex_exit(rxq->rxq_lock);
4732 
4733 		mutex_enter(txq->txq_lock);
4734 		txq->txq_stopping = true;
4735 		mutex_exit(txq->txq_lock);
4736 	}
4737 }
4738 
4739 /*
4740  * wm_init:		[ifnet interface function]
4741  *
4742  *	Initialize the interface.
4743  */
4744 static int
4745 wm_init(struct ifnet *ifp)
4746 {
4747 	struct wm_softc *sc = ifp->if_softc;
4748 	int ret;
4749 
4750 	WM_CORE_LOCK(sc);
4751 	ret = wm_init_locked(ifp);
4752 	WM_CORE_UNLOCK(sc);
4753 
4754 	return ret;
4755 }
4756 
4757 static int
4758 wm_init_locked(struct ifnet *ifp)
4759 {
4760 	struct wm_softc *sc = ifp->if_softc;
4761 	int i, j, trynum, error = 0;
4762 	uint32_t reg;
4763 
4764 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4765 		device_xname(sc->sc_dev), __func__));
4766 	KASSERT(WM_CORE_LOCKED(sc));
4767 
4768 	/*
4769 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
4770 	 * There is a small but measurable benefit to avoiding the adjusment
4771 	 * of the descriptor so that the headers are aligned, for normal mtu,
4772 	 * on such platforms.  One possibility is that the DMA itself is
4773 	 * slightly more efficient if the front of the entire packet (instead
4774 	 * of the front of the headers) is aligned.
4775 	 *
4776 	 * Note we must always set align_tweak to 0 if we are using
4777 	 * jumbo frames.
4778 	 */
4779 #ifdef __NO_STRICT_ALIGNMENT
4780 	sc->sc_align_tweak = 0;
4781 #else
4782 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
4783 		sc->sc_align_tweak = 0;
4784 	else
4785 		sc->sc_align_tweak = 2;
4786 #endif /* __NO_STRICT_ALIGNMENT */
4787 
4788 	/* Cancel any pending I/O. */
4789 	wm_stop_locked(ifp, 0);
4790 
4791 	/* update statistics before reset */
4792 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
4793 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
4794 
4795 	/* PCH_SPT hardware workaround */
4796 	if (sc->sc_type == WM_T_PCH_SPT)
4797 		wm_flush_desc_rings(sc);
4798 
4799 	/* Reset the chip to a known state. */
4800 	wm_reset(sc);
4801 
4802 	/* AMT based hardware can now take control from firmware */
4803 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
4804 		wm_get_hw_control(sc);
4805 
4806 	/* Init hardware bits */
4807 	wm_initialize_hardware_bits(sc);
4808 
4809 	/* Reset the PHY. */
4810 	if (sc->sc_flags & WM_F_HAS_MII)
4811 		wm_gmii_reset(sc);
4812 
4813 	/* Calculate (E)ITR value */
4814 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4815 		sc->sc_itr = 450;	/* For EITR */
4816 	} else if (sc->sc_type >= WM_T_82543) {
4817 		/*
4818 		 * Set up the interrupt throttling register (units of 256ns)
4819 		 * Note that a footnote in Intel's documentation says this
4820 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
4821 		 * or 10Mbit mode.  Empirically, it appears to be the case
4822 		 * that that is also true for the 1024ns units of the other
4823 		 * interrupt-related timer registers -- so, really, we ought
4824 		 * to divide this value by 4 when the link speed is low.
4825 		 *
4826 		 * XXX implement this division at link speed change!
4827 		 */
4828 
4829 		/*
4830 		 * For N interrupts/sec, set this value to:
4831 		 * 1000000000 / (N * 256).  Note that we set the
4832 		 * absolute and packet timer values to this value
4833 		 * divided by 4 to get "simple timer" behavior.
4834 		 */
4835 
4836 		sc->sc_itr = 1500;		/* 2604 ints/sec */
4837 	}
4838 
4839 	error = wm_init_txrx_queues(sc);
4840 	if (error)
4841 		goto out;
4842 
4843 	/*
4844 	 * Clear out the VLAN table -- we don't use it (yet).
4845 	 */
4846 	CSR_WRITE(sc, WMREG_VET, 0);
4847 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
4848 		trynum = 10; /* Due to hw errata */
4849 	else
4850 		trynum = 1;
4851 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
4852 		for (j = 0; j < trynum; j++)
4853 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
4854 
4855 	/*
4856 	 * Set up flow-control parameters.
4857 	 *
4858 	 * XXX Values could probably stand some tuning.
4859 	 */
4860 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
4861 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
4862 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
4863 	    && (sc->sc_type != WM_T_PCH_SPT)) {
4864 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
4865 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
4866 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
4867 	}
4868 
4869 	sc->sc_fcrtl = FCRTL_DFLT;
4870 	if (sc->sc_type < WM_T_82543) {
4871 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
4872 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
4873 	} else {
4874 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
4875 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
4876 	}
4877 
4878 	if (sc->sc_type == WM_T_80003)
4879 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
4880 	else
4881 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
4882 
4883 	/* Writes the control register. */
4884 	wm_set_vlan(sc);
4885 
4886 	if (sc->sc_flags & WM_F_HAS_MII) {
4887 		int val;
4888 
4889 		switch (sc->sc_type) {
4890 		case WM_T_80003:
4891 		case WM_T_ICH8:
4892 		case WM_T_ICH9:
4893 		case WM_T_ICH10:
4894 		case WM_T_PCH:
4895 		case WM_T_PCH2:
4896 		case WM_T_PCH_LPT:
4897 		case WM_T_PCH_SPT:
4898 			/*
4899 			 * Set the mac to wait the maximum time between each
4900 			 * iteration and increase the max iterations when
4901 			 * polling the phy; this fixes erroneous timeouts at
4902 			 * 10Mbps.
4903 			 */
4904 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
4905 			    0xFFFF);
4906 			val = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM);
4907 			val |= 0x3F;
4908 			wm_kmrn_writereg(sc,
4909 			    KUMCTRLSTA_OFFSET_INB_PARAM, val);
4910 			break;
4911 		default:
4912 			break;
4913 		}
4914 
4915 		if (sc->sc_type == WM_T_80003) {
4916 			val = CSR_READ(sc, WMREG_CTRL_EXT);
4917 			val &= ~CTRL_EXT_LINK_MODE_MASK;
4918 			CSR_WRITE(sc, WMREG_CTRL_EXT, val);
4919 
4920 			/* Bypass RX and TX FIFO's */
4921 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
4922 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
4923 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
4924 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
4925 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
4926 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
4927 		}
4928 	}
4929 #if 0
4930 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
4931 #endif
4932 
4933 	/* Set up checksum offload parameters. */
4934 	reg = CSR_READ(sc, WMREG_RXCSUM);
4935 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
4936 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
4937 		reg |= RXCSUM_IPOFL;
4938 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
4939 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
4940 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
4941 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
4942 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
4943 
4944 	/* Set up MSI-X */
4945 	if (sc->sc_nintrs > 1) {
4946 		uint32_t ivar;
4947 		struct wm_queue *wmq;
4948 		int qid, qintr_idx;
4949 
4950 		if (sc->sc_type == WM_T_82575) {
4951 			/* Interrupt control */
4952 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4953 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
4954 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4955 
4956 			/* TX and RX */
4957 			for (i = 0; i < sc->sc_nqueues; i++) {
4958 				wmq = &sc->sc_queue[i];
4959 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
4960 				    EITR_TX_QUEUE(wmq->wmq_id)
4961 				    | EITR_RX_QUEUE(wmq->wmq_id));
4962 			}
4963 			/* Link status */
4964 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
4965 			    EITR_OTHER);
4966 		} else if (sc->sc_type == WM_T_82574) {
4967 			/* Interrupt control */
4968 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4969 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
4970 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4971 
4972 			ivar = 0;
4973 			/* TX and RX */
4974 			for (i = 0; i < sc->sc_nqueues; i++) {
4975 				wmq = &sc->sc_queue[i];
4976 				qid = wmq->wmq_id;
4977 				qintr_idx = wmq->wmq_intr_idx;
4978 
4979 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4980 				    IVAR_TX_MASK_Q_82574(qid));
4981 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
4982 				    IVAR_RX_MASK_Q_82574(qid));
4983 			}
4984 			/* Link status */
4985 			ivar |= __SHIFTIN((IVAR_VALID_82574
4986 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
4987 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
4988 		} else {
4989 			/* Interrupt control */
4990 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
4991 			    | GPIE_EIAME | GPIE_PBA);
4992 
4993 			switch (sc->sc_type) {
4994 			case WM_T_82580:
4995 			case WM_T_I350:
4996 			case WM_T_I354:
4997 			case WM_T_I210:
4998 			case WM_T_I211:
4999 				/* TX and RX */
5000 				for (i = 0; i < sc->sc_nqueues; i++) {
5001 					wmq = &sc->sc_queue[i];
5002 					qid = wmq->wmq_id;
5003 					qintr_idx = wmq->wmq_intr_idx;
5004 
5005 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
5006 					ivar &= ~IVAR_TX_MASK_Q(qid);
5007 					ivar |= __SHIFTIN((qintr_idx
5008 						| IVAR_VALID),
5009 					    IVAR_TX_MASK_Q(qid));
5010 					ivar &= ~IVAR_RX_MASK_Q(qid);
5011 					ivar |= __SHIFTIN((qintr_idx
5012 						| IVAR_VALID),
5013 					    IVAR_RX_MASK_Q(qid));
5014 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
5015 				}
5016 				break;
5017 			case WM_T_82576:
5018 				/* TX and RX */
5019 				for (i = 0; i < sc->sc_nqueues; i++) {
5020 					wmq = &sc->sc_queue[i];
5021 					qid = wmq->wmq_id;
5022 					qintr_idx = wmq->wmq_intr_idx;
5023 
5024 					ivar = CSR_READ(sc,
5025 					    WMREG_IVAR_Q_82576(qid));
5026 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
5027 					ivar |= __SHIFTIN((qintr_idx
5028 						| IVAR_VALID),
5029 					    IVAR_TX_MASK_Q_82576(qid));
5030 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
5031 					ivar |= __SHIFTIN((qintr_idx
5032 						| IVAR_VALID),
5033 					    IVAR_RX_MASK_Q_82576(qid));
5034 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
5035 					    ivar);
5036 				}
5037 				break;
5038 			default:
5039 				break;
5040 			}
5041 
5042 			/* Link status */
5043 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
5044 			    IVAR_MISC_OTHER);
5045 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
5046 		}
5047 
5048 		if (sc->sc_nqueues > 1) {
5049 			wm_init_rss(sc);
5050 
5051 			/*
5052 			** NOTE: Receive Full-Packet Checksum Offload
5053 			** is mutually exclusive with Multiqueue. However
5054 			** this is not the same as TCP/IP checksums which
5055 			** still work.
5056 			*/
5057 			reg = CSR_READ(sc, WMREG_RXCSUM);
5058 			reg |= RXCSUM_PCSD;
5059 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
5060 		}
5061 	}
5062 
5063 	/* Set up the interrupt registers. */
5064 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5065 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
5066 	    ICR_RXO | ICR_RXT0;
5067 	if (sc->sc_nintrs > 1) {
5068 		uint32_t mask;
5069 		struct wm_queue *wmq;
5070 
5071 		switch (sc->sc_type) {
5072 		case WM_T_82574:
5073 			CSR_WRITE(sc, WMREG_EIAC_82574,
5074 			    WMREG_EIAC_82574_MSIX_MASK);
5075 			sc->sc_icr |= WMREG_EIAC_82574_MSIX_MASK;
5076 			CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5077 			break;
5078 		default:
5079 			if (sc->sc_type == WM_T_82575) {
5080 				mask = 0;
5081 				for (i = 0; i < sc->sc_nqueues; i++) {
5082 					wmq = &sc->sc_queue[i];
5083 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
5084 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
5085 				}
5086 				mask |= EITR_OTHER;
5087 			} else {
5088 				mask = 0;
5089 				for (i = 0; i < sc->sc_nqueues; i++) {
5090 					wmq = &sc->sc_queue[i];
5091 					mask |= 1 << wmq->wmq_intr_idx;
5092 				}
5093 				mask |= 1 << sc->sc_link_intr_idx;
5094 			}
5095 			CSR_WRITE(sc, WMREG_EIAC, mask);
5096 			CSR_WRITE(sc, WMREG_EIAM, mask);
5097 			CSR_WRITE(sc, WMREG_EIMS, mask);
5098 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
5099 			break;
5100 		}
5101 	} else
5102 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5103 
5104 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
5105 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
5106 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
5107 	    || (sc->sc_type == WM_T_PCH_SPT)) {
5108 		reg = CSR_READ(sc, WMREG_KABGTXD);
5109 		reg |= KABGTXD_BGSQLBIAS;
5110 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
5111 	}
5112 
5113 	/* Set up the inter-packet gap. */
5114 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5115 
5116 	if (sc->sc_type >= WM_T_82543) {
5117 		/*
5118 		 * XXX 82574 has both ITR and EITR. SET EITR when we use
5119 		 * the multi queue function with MSI-X.
5120 		 */
5121 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5122 			int qidx;
5123 			for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5124 				struct wm_queue *wmq = &sc->sc_queue[qidx];
5125 				CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx),
5126 				    sc->sc_itr);
5127 			}
5128 			/*
5129 			 * Link interrupts occur much less than TX
5130 			 * interrupts and RX interrupts. So, we don't
5131 			 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
5132 			 * FreeBSD's if_igb.
5133 			 */
5134 		} else
5135 			CSR_WRITE(sc, WMREG_ITR, sc->sc_itr);
5136 	}
5137 
5138 	/* Set the VLAN ethernetype. */
5139 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
5140 
5141 	/*
5142 	 * Set up the transmit control register; we start out with
5143 	 * a collision distance suitable for FDX, but update it whe
5144 	 * we resolve the media type.
5145 	 */
5146 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
5147 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
5148 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5149 	if (sc->sc_type >= WM_T_82571)
5150 		sc->sc_tctl |= TCTL_MULR;
5151 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5152 
5153 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5154 		/* Write TDT after TCTL.EN is set. See the document. */
5155 		CSR_WRITE(sc, WMREG_TDT(0), 0);
5156 	}
5157 
5158 	if (sc->sc_type == WM_T_80003) {
5159 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
5160 		reg &= ~TCTL_EXT_GCEX_MASK;
5161 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
5162 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
5163 	}
5164 
5165 	/* Set the media. */
5166 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
5167 		goto out;
5168 
5169 	/* Configure for OS presence */
5170 	wm_init_manageability(sc);
5171 
5172 	/*
5173 	 * Set up the receive control register; we actually program
5174 	 * the register when we set the receive filter.  Use multicast
5175 	 * address offset type 0.
5176 	 *
5177 	 * Only the i82544 has the ability to strip the incoming
5178 	 * CRC, so we don't enable that feature.
5179 	 */
5180 	sc->sc_mchash_type = 0;
5181 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5182 	    | RCTL_MO(sc->sc_mchash_type);
5183 
5184 	/*
5185 	 * The I350 has a bug where it always strips the CRC whether
5186 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
5187 	 */
5188 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5189 	    || (sc->sc_type == WM_T_I210))
5190 		sc->sc_rctl |= RCTL_SECRC;
5191 
5192 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5193 	    && (ifp->if_mtu > ETHERMTU)) {
5194 		sc->sc_rctl |= RCTL_LPE;
5195 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5196 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5197 	}
5198 
5199 	if (MCLBYTES == 2048) {
5200 		sc->sc_rctl |= RCTL_2k;
5201 	} else {
5202 		if (sc->sc_type >= WM_T_82543) {
5203 			switch (MCLBYTES) {
5204 			case 4096:
5205 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5206 				break;
5207 			case 8192:
5208 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5209 				break;
5210 			case 16384:
5211 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5212 				break;
5213 			default:
5214 				panic("wm_init: MCLBYTES %d unsupported",
5215 				    MCLBYTES);
5216 				break;
5217 			}
5218 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
5219 	}
5220 
5221 	/* Set the receive filter. */
5222 	wm_set_filter(sc);
5223 
5224 	/* Enable ECC */
5225 	switch (sc->sc_type) {
5226 	case WM_T_82571:
5227 		reg = CSR_READ(sc, WMREG_PBA_ECC);
5228 		reg |= PBA_ECC_CORR_EN;
5229 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5230 		break;
5231 	case WM_T_PCH_LPT:
5232 	case WM_T_PCH_SPT:
5233 		reg = CSR_READ(sc, WMREG_PBECCSTS);
5234 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5235 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5236 
5237 		sc->sc_ctrl |= CTRL_MEHE;
5238 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5239 		break;
5240 	default:
5241 		break;
5242 	}
5243 
5244 	/* On 575 and later set RDT only if RX enabled */
5245 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5246 		int qidx;
5247 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5248 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5249 			for (i = 0; i < WM_NRXDESC; i++) {
5250 				mutex_enter(rxq->rxq_lock);
5251 				wm_init_rxdesc(rxq, i);
5252 				mutex_exit(rxq->rxq_lock);
5253 
5254 			}
5255 		}
5256 	}
5257 
5258 	wm_turnon(sc);
5259 
5260 	/* Start the one second link check clock. */
5261 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5262 
5263 	/* ...all done! */
5264 	ifp->if_flags |= IFF_RUNNING;
5265 	ifp->if_flags &= ~IFF_OACTIVE;
5266 
5267  out:
5268 	sc->sc_if_flags = ifp->if_flags;
5269 	if (error)
5270 		log(LOG_ERR, "%s: interface not running\n",
5271 		    device_xname(sc->sc_dev));
5272 	return error;
5273 }
5274 
5275 /*
5276  * wm_stop:		[ifnet interface function]
5277  *
5278  *	Stop transmission on the interface.
5279  */
5280 static void
5281 wm_stop(struct ifnet *ifp, int disable)
5282 {
5283 	struct wm_softc *sc = ifp->if_softc;
5284 
5285 	WM_CORE_LOCK(sc);
5286 	wm_stop_locked(ifp, disable);
5287 	WM_CORE_UNLOCK(sc);
5288 }
5289 
5290 static void
5291 wm_stop_locked(struct ifnet *ifp, int disable)
5292 {
5293 	struct wm_softc *sc = ifp->if_softc;
5294 	struct wm_txsoft *txs;
5295 	int i, qidx;
5296 
5297 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5298 		device_xname(sc->sc_dev), __func__));
5299 	KASSERT(WM_CORE_LOCKED(sc));
5300 
5301 	wm_turnoff(sc);
5302 
5303 	/* Stop the one second clock. */
5304 	callout_stop(&sc->sc_tick_ch);
5305 
5306 	/* Stop the 82547 Tx FIFO stall check timer. */
5307 	if (sc->sc_type == WM_T_82547)
5308 		callout_stop(&sc->sc_txfifo_ch);
5309 
5310 	if (sc->sc_flags & WM_F_HAS_MII) {
5311 		/* Down the MII. */
5312 		mii_down(&sc->sc_mii);
5313 	} else {
5314 #if 0
5315 		/* Should we clear PHY's status properly? */
5316 		wm_reset(sc);
5317 #endif
5318 	}
5319 
5320 	/* Stop the transmit and receive processes. */
5321 	CSR_WRITE(sc, WMREG_TCTL, 0);
5322 	CSR_WRITE(sc, WMREG_RCTL, 0);
5323 	sc->sc_rctl &= ~RCTL_EN;
5324 
5325 	/*
5326 	 * Clear the interrupt mask to ensure the device cannot assert its
5327 	 * interrupt line.
5328 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5329 	 * service any currently pending or shared interrupt.
5330 	 */
5331 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5332 	sc->sc_icr = 0;
5333 	if (sc->sc_nintrs > 1) {
5334 		if (sc->sc_type != WM_T_82574) {
5335 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
5336 			CSR_WRITE(sc, WMREG_EIAC, 0);
5337 		} else
5338 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
5339 	}
5340 
5341 	/* Release any queued transmit buffers. */
5342 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5343 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5344 		struct wm_txqueue *txq = &wmq->wmq_txq;
5345 		mutex_enter(txq->txq_lock);
5346 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5347 			txs = &txq->txq_soft[i];
5348 			if (txs->txs_mbuf != NULL) {
5349 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
5350 				m_freem(txs->txs_mbuf);
5351 				txs->txs_mbuf = NULL;
5352 			}
5353 		}
5354 		mutex_exit(txq->txq_lock);
5355 	}
5356 
5357 	/* Mark the interface as down and cancel the watchdog timer. */
5358 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5359 	ifp->if_timer = 0;
5360 
5361 	if (disable) {
5362 		for (i = 0; i < sc->sc_nqueues; i++) {
5363 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5364 			mutex_enter(rxq->rxq_lock);
5365 			wm_rxdrain(rxq);
5366 			mutex_exit(rxq->rxq_lock);
5367 		}
5368 	}
5369 
5370 #if 0 /* notyet */
5371 	if (sc->sc_type >= WM_T_82544)
5372 		CSR_WRITE(sc, WMREG_WUC, 0);
5373 #endif
5374 }
5375 
5376 static void
5377 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
5378 {
5379 	struct mbuf *m;
5380 	int i;
5381 
5382 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
5383 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
5384 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
5385 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
5386 		    m->m_data, m->m_len, m->m_flags);
5387 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
5388 	    i, i == 1 ? "" : "s");
5389 }
5390 
5391 /*
5392  * wm_82547_txfifo_stall:
5393  *
5394  *	Callout used to wait for the 82547 Tx FIFO to drain,
5395  *	reset the FIFO pointers, and restart packet transmission.
5396  */
5397 static void
5398 wm_82547_txfifo_stall(void *arg)
5399 {
5400 	struct wm_softc *sc = arg;
5401 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5402 
5403 	mutex_enter(txq->txq_lock);
5404 
5405 	if (txq->txq_stopping)
5406 		goto out;
5407 
5408 	if (txq->txq_fifo_stall) {
5409 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
5410 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
5411 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
5412 			/*
5413 			 * Packets have drained.  Stop transmitter, reset
5414 			 * FIFO pointers, restart transmitter, and kick
5415 			 * the packet queue.
5416 			 */
5417 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
5418 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
5419 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
5420 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
5421 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
5422 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
5423 			CSR_WRITE(sc, WMREG_TCTL, tctl);
5424 			CSR_WRITE_FLUSH(sc);
5425 
5426 			txq->txq_fifo_head = 0;
5427 			txq->txq_fifo_stall = 0;
5428 			wm_start_locked(&sc->sc_ethercom.ec_if);
5429 		} else {
5430 			/*
5431 			 * Still waiting for packets to drain; try again in
5432 			 * another tick.
5433 			 */
5434 			callout_schedule(&sc->sc_txfifo_ch, 1);
5435 		}
5436 	}
5437 
5438 out:
5439 	mutex_exit(txq->txq_lock);
5440 }
5441 
5442 /*
5443  * wm_82547_txfifo_bugchk:
5444  *
5445  *	Check for bug condition in the 82547 Tx FIFO.  We need to
5446  *	prevent enqueueing a packet that would wrap around the end
5447  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
5448  *
5449  *	We do this by checking the amount of space before the end
5450  *	of the Tx FIFO buffer.  If the packet will not fit, we "stall"
5451  *	the Tx FIFO, wait for all remaining packets to drain, reset
5452  *	the internal FIFO pointers to the beginning, and restart
5453  *	transmission on the interface.
5454  */
5455 #define	WM_FIFO_HDR		0x10
5456 #define	WM_82547_PAD_LEN	0x3e0
5457 static int
5458 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
5459 {
5460 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
5461 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
5462 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
5463 
5464 	/* Just return if already stalled. */
5465 	if (txq->txq_fifo_stall)
5466 		return 1;
5467 
5468 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
5469 		/* Stall only occurs in half-duplex mode. */
5470 		goto send_packet;
5471 	}
5472 
5473 	if (len >= WM_82547_PAD_LEN + space) {
5474 		txq->txq_fifo_stall = 1;
5475 		callout_schedule(&sc->sc_txfifo_ch, 1);
5476 		return 1;
5477 	}
5478 
5479  send_packet:
5480 	txq->txq_fifo_head += len;
5481 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
5482 		txq->txq_fifo_head -= txq->txq_fifo_size;
5483 
5484 	return 0;
5485 }
5486 
5487 static int
5488 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5489 {
5490 	int error;
5491 
5492 	/*
5493 	 * Allocate the control data structures, and create and load the
5494 	 * DMA map for it.
5495 	 *
5496 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5497 	 * memory.  So must Rx descriptors.  We simplify by allocating
5498 	 * both sets within the same 4G segment.
5499 	 */
5500 	if (sc->sc_type < WM_T_82544)
5501 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
5502 	else
5503 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
5504 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5505 		txq->txq_descsize = sizeof(nq_txdesc_t);
5506 	else
5507 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
5508 
5509 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
5510 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
5511 		    1, &txq->txq_desc_rseg, 0)) != 0) {
5512 		aprint_error_dev(sc->sc_dev,
5513 		    "unable to allocate TX control data, error = %d\n",
5514 		    error);
5515 		goto fail_0;
5516 	}
5517 
5518 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
5519 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
5520 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
5521 		aprint_error_dev(sc->sc_dev,
5522 		    "unable to map TX control data, error = %d\n", error);
5523 		goto fail_1;
5524 	}
5525 
5526 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
5527 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
5528 		aprint_error_dev(sc->sc_dev,
5529 		    "unable to create TX control data DMA map, error = %d\n",
5530 		    error);
5531 		goto fail_2;
5532 	}
5533 
5534 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
5535 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
5536 		aprint_error_dev(sc->sc_dev,
5537 		    "unable to load TX control data DMA map, error = %d\n",
5538 		    error);
5539 		goto fail_3;
5540 	}
5541 
5542 	return 0;
5543 
5544  fail_3:
5545 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5546  fail_2:
5547 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5548 	    WM_TXDESCS_SIZE(txq));
5549  fail_1:
5550 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5551  fail_0:
5552 	return error;
5553 }
5554 
5555 static void
5556 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
5557 {
5558 
5559 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
5560 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
5561 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
5562 	    WM_TXDESCS_SIZE(txq));
5563 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
5564 }
5565 
5566 static int
5567 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5568 {
5569 	int error;
5570 
5571 	/*
5572 	 * Allocate the control data structures, and create and load the
5573 	 * DMA map for it.
5574 	 *
5575 	 * NOTE: All Tx descriptors must be in the same 4G segment of
5576 	 * memory.  So must Rx descriptors.  We simplify by allocating
5577 	 * both sets within the same 4G segment.
5578 	 */
5579 	rxq->rxq_desc_size = sizeof(wiseman_rxdesc_t) * WM_NRXDESC;
5580 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq->rxq_desc_size,
5581 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
5582 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
5583 		aprint_error_dev(sc->sc_dev,
5584 		    "unable to allocate RX control data, error = %d\n",
5585 		    error);
5586 		goto fail_0;
5587 	}
5588 
5589 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
5590 		    rxq->rxq_desc_rseg, rxq->rxq_desc_size,
5591 		    (void **)&rxq->rxq_descs, BUS_DMA_COHERENT)) != 0) {
5592 		aprint_error_dev(sc->sc_dev,
5593 		    "unable to map RX control data, error = %d\n", error);
5594 		goto fail_1;
5595 	}
5596 
5597 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq->rxq_desc_size, 1,
5598 		    rxq->rxq_desc_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
5599 		aprint_error_dev(sc->sc_dev,
5600 		    "unable to create RX control data DMA map, error = %d\n",
5601 		    error);
5602 		goto fail_2;
5603 	}
5604 
5605 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
5606 		    rxq->rxq_descs, rxq->rxq_desc_size, NULL, 0)) != 0) {
5607 		aprint_error_dev(sc->sc_dev,
5608 		    "unable to load RX control data DMA map, error = %d\n",
5609 		    error);
5610 		goto fail_3;
5611 	}
5612 
5613 	return 0;
5614 
5615  fail_3:
5616 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5617  fail_2:
5618 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5619 	    rxq->rxq_desc_size);
5620  fail_1:
5621 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5622  fail_0:
5623 	return error;
5624 }
5625 
5626 static void
5627 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
5628 {
5629 
5630 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
5631 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
5632 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs,
5633 	    rxq->rxq_desc_size);
5634 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
5635 }
5636 
5637 
5638 static int
5639 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5640 {
5641 	int i, error;
5642 
5643 	/* Create the transmit buffer DMA maps. */
5644 	WM_TXQUEUELEN(txq) =
5645 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
5646 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
5647 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5648 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
5649 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
5650 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
5651 			aprint_error_dev(sc->sc_dev,
5652 			    "unable to create Tx DMA map %d, error = %d\n",
5653 			    i, error);
5654 			goto fail;
5655 		}
5656 	}
5657 
5658 	return 0;
5659 
5660  fail:
5661 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5662 		if (txq->txq_soft[i].txs_dmamap != NULL)
5663 			bus_dmamap_destroy(sc->sc_dmat,
5664 			    txq->txq_soft[i].txs_dmamap);
5665 	}
5666 	return error;
5667 }
5668 
5669 static void
5670 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
5671 {
5672 	int i;
5673 
5674 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
5675 		if (txq->txq_soft[i].txs_dmamap != NULL)
5676 			bus_dmamap_destroy(sc->sc_dmat,
5677 			    txq->txq_soft[i].txs_dmamap);
5678 	}
5679 }
5680 
5681 static int
5682 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5683 {
5684 	int i, error;
5685 
5686 	/* Create the receive buffer DMA maps. */
5687 	for (i = 0; i < WM_NRXDESC; i++) {
5688 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
5689 			    MCLBYTES, 0, 0,
5690 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
5691 			aprint_error_dev(sc->sc_dev,
5692 			    "unable to create Rx DMA map %d error = %d\n",
5693 			    i, error);
5694 			goto fail;
5695 		}
5696 		rxq->rxq_soft[i].rxs_mbuf = NULL;
5697 	}
5698 
5699 	return 0;
5700 
5701  fail:
5702 	for (i = 0; i < WM_NRXDESC; i++) {
5703 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5704 			bus_dmamap_destroy(sc->sc_dmat,
5705 			    rxq->rxq_soft[i].rxs_dmamap);
5706 	}
5707 	return error;
5708 }
5709 
5710 static void
5711 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
5712 {
5713 	int i;
5714 
5715 	for (i = 0; i < WM_NRXDESC; i++) {
5716 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
5717 			bus_dmamap_destroy(sc->sc_dmat,
5718 			    rxq->rxq_soft[i].rxs_dmamap);
5719 	}
5720 }
5721 
5722 /*
5723  * wm_alloc_quques:
5724  *	Allocate {tx,rx}descs and {tx,rx} buffers
5725  */
5726 static int
5727 wm_alloc_txrx_queues(struct wm_softc *sc)
5728 {
5729 	int i, error, tx_done, rx_done;
5730 
5731 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
5732 	    KM_SLEEP);
5733 	if (sc->sc_queue == NULL) {
5734 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
5735 		error = ENOMEM;
5736 		goto fail_0;
5737 	}
5738 
5739 	/*
5740 	 * For transmission
5741 	 */
5742 	error = 0;
5743 	tx_done = 0;
5744 	for (i = 0; i < sc->sc_nqueues; i++) {
5745 #ifdef WM_EVENT_COUNTERS
5746 		int j;
5747 		const char *xname;
5748 #endif
5749 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5750 		txq->txq_sc = sc;
5751 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5752 
5753 		error = wm_alloc_tx_descs(sc, txq);
5754 		if (error)
5755 			break;
5756 		error = wm_alloc_tx_buffer(sc, txq);
5757 		if (error) {
5758 			wm_free_tx_descs(sc, txq);
5759 			break;
5760 		}
5761 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
5762 		if (txq->txq_interq == NULL) {
5763 			wm_free_tx_descs(sc, txq);
5764 			wm_free_tx_buffer(sc, txq);
5765 			error = ENOMEM;
5766 			break;
5767 		}
5768 
5769 #ifdef WM_EVENT_COUNTERS
5770 		xname = device_xname(sc->sc_dev);
5771 
5772 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
5773 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
5774 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
5775 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
5776 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
5777 
5778 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
5779 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
5780 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
5781 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
5782 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
5783 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
5784 
5785 		for (j = 0; j < WM_NTXSEGS; j++) {
5786 			snprintf(txq->txq_txseg_evcnt_names[j],
5787 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
5788 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
5789 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
5790 		}
5791 
5792 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
5793 
5794 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
5795 #endif /* WM_EVENT_COUNTERS */
5796 
5797 		tx_done++;
5798 	}
5799 	if (error)
5800 		goto fail_1;
5801 
5802 	/*
5803 	 * For recieve
5804 	 */
5805 	error = 0;
5806 	rx_done = 0;
5807 	for (i = 0; i < sc->sc_nqueues; i++) {
5808 #ifdef WM_EVENT_COUNTERS
5809 		const char *xname;
5810 #endif
5811 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5812 		rxq->rxq_sc = sc;
5813 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
5814 
5815 		error = wm_alloc_rx_descs(sc, rxq);
5816 		if (error)
5817 			break;
5818 
5819 		error = wm_alloc_rx_buffer(sc, rxq);
5820 		if (error) {
5821 			wm_free_rx_descs(sc, rxq);
5822 			break;
5823 		}
5824 
5825 #ifdef WM_EVENT_COUNTERS
5826 		xname = device_xname(sc->sc_dev);
5827 
5828 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
5829 
5830 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
5831 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
5832 #endif /* WM_EVENT_COUNTERS */
5833 
5834 		rx_done++;
5835 	}
5836 	if (error)
5837 		goto fail_2;
5838 
5839 	return 0;
5840 
5841  fail_2:
5842 	for (i = 0; i < rx_done; i++) {
5843 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5844 		wm_free_rx_buffer(sc, rxq);
5845 		wm_free_rx_descs(sc, rxq);
5846 		if (rxq->rxq_lock)
5847 			mutex_obj_free(rxq->rxq_lock);
5848 	}
5849  fail_1:
5850 	for (i = 0; i < tx_done; i++) {
5851 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5852 		pcq_destroy(txq->txq_interq);
5853 		wm_free_tx_buffer(sc, txq);
5854 		wm_free_tx_descs(sc, txq);
5855 		if (txq->txq_lock)
5856 			mutex_obj_free(txq->txq_lock);
5857 	}
5858 
5859 	kmem_free(sc->sc_queue,
5860 	    sizeof(struct wm_queue) * sc->sc_nqueues);
5861  fail_0:
5862 	return error;
5863 }
5864 
5865 /*
5866  * wm_free_quques:
5867  *	Free {tx,rx}descs and {tx,rx} buffers
5868  */
5869 static void
5870 wm_free_txrx_queues(struct wm_softc *sc)
5871 {
5872 	int i;
5873 
5874 	for (i = 0; i < sc->sc_nqueues; i++) {
5875 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5876 		wm_free_rx_buffer(sc, rxq);
5877 		wm_free_rx_descs(sc, rxq);
5878 		if (rxq->rxq_lock)
5879 			mutex_obj_free(rxq->rxq_lock);
5880 	}
5881 
5882 	for (i = 0; i < sc->sc_nqueues; i++) {
5883 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5884 		wm_free_tx_buffer(sc, txq);
5885 		wm_free_tx_descs(sc, txq);
5886 		if (txq->txq_lock)
5887 			mutex_obj_free(txq->txq_lock);
5888 	}
5889 
5890 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
5891 }
5892 
5893 static void
5894 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5895 {
5896 
5897 	KASSERT(mutex_owned(txq->txq_lock));
5898 
5899 	/* Initialize the transmit descriptor ring. */
5900 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
5901 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
5902 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
5903 	txq->txq_free = WM_NTXDESC(txq);
5904 	txq->txq_next = 0;
5905 }
5906 
5907 static void
5908 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5909     struct wm_txqueue *txq)
5910 {
5911 
5912 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5913 		device_xname(sc->sc_dev), __func__));
5914 	KASSERT(mutex_owned(txq->txq_lock));
5915 
5916 	if (sc->sc_type < WM_T_82543) {
5917 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
5918 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
5919 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
5920 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
5921 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
5922 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
5923 	} else {
5924 		int qid = wmq->wmq_id;
5925 
5926 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
5927 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
5928 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
5929 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
5930 
5931 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5932 			/*
5933 			 * Don't write TDT before TCTL.EN is set.
5934 			 * See the document.
5935 			 */
5936 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
5937 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
5938 			    | TXDCTL_WTHRESH(0));
5939 		else {
5940 			/* ITR / 4 */
5941 			CSR_WRITE(sc, WMREG_TIDV, sc->sc_itr / 4);
5942 			if (sc->sc_type >= WM_T_82540) {
5943 				/* should be same */
5944 				CSR_WRITE(sc, WMREG_TADV, sc->sc_itr / 4);
5945 			}
5946 
5947 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
5948 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
5949 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
5950 		}
5951 	}
5952 }
5953 
5954 static void
5955 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
5956 {
5957 	int i;
5958 
5959 	KASSERT(mutex_owned(txq->txq_lock));
5960 
5961 	/* Initialize the transmit job descriptors. */
5962 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
5963 		txq->txq_soft[i].txs_mbuf = NULL;
5964 	txq->txq_sfree = WM_TXQUEUELEN(txq);
5965 	txq->txq_snext = 0;
5966 	txq->txq_sdirty = 0;
5967 }
5968 
5969 static void
5970 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
5971     struct wm_txqueue *txq)
5972 {
5973 
5974 	KASSERT(mutex_owned(txq->txq_lock));
5975 
5976 	/*
5977 	 * Set up some register offsets that are different between
5978 	 * the i82542 and the i82543 and later chips.
5979 	 */
5980 	if (sc->sc_type < WM_T_82543)
5981 		txq->txq_tdt_reg = WMREG_OLD_TDT;
5982 	else
5983 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
5984 
5985 	wm_init_tx_descs(sc, txq);
5986 	wm_init_tx_regs(sc, wmq, txq);
5987 	wm_init_tx_buffer(sc, txq);
5988 }
5989 
5990 static void
5991 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
5992     struct wm_rxqueue *rxq)
5993 {
5994 
5995 	KASSERT(mutex_owned(rxq->rxq_lock));
5996 
5997 	/*
5998 	 * Initialize the receive descriptor and receive job
5999 	 * descriptor rings.
6000 	 */
6001 	if (sc->sc_type < WM_T_82543) {
6002 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
6003 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
6004 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
6005 		    sizeof(wiseman_rxdesc_t) * WM_NRXDESC);
6006 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
6007 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
6008 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
6009 
6010 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
6011 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
6012 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
6013 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
6014 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
6015 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
6016 	} else {
6017 		int qid = wmq->wmq_id;
6018 
6019 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
6020 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
6021 		CSR_WRITE(sc, WMREG_RDLEN(qid), rxq->rxq_desc_size);
6022 
6023 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6024 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
6025 				panic("%s: MCLBYTES %d unsupported for i2575 or higher\n", __func__, MCLBYTES);
6026 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_LEGACY
6027 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
6028 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
6029 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
6030 			    | RXDCTL_WTHRESH(1));
6031 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6032 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6033 		} else {
6034 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6035 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6036 			/* ITR / 4 */
6037 			CSR_WRITE(sc, WMREG_RDTR, (sc->sc_itr / 4) | RDTR_FPD);
6038 			/* MUST be same */
6039 			CSR_WRITE(sc, WMREG_RADV, sc->sc_itr / 4);
6040 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
6041 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
6042 		}
6043 	}
6044 }
6045 
6046 static int
6047 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6048 {
6049 	struct wm_rxsoft *rxs;
6050 	int error, i;
6051 
6052 	KASSERT(mutex_owned(rxq->rxq_lock));
6053 
6054 	for (i = 0; i < WM_NRXDESC; i++) {
6055 		rxs = &rxq->rxq_soft[i];
6056 		if (rxs->rxs_mbuf == NULL) {
6057 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
6058 				log(LOG_ERR, "%s: unable to allocate or map "
6059 				    "rx buffer %d, error = %d\n",
6060 				    device_xname(sc->sc_dev), i, error);
6061 				/*
6062 				 * XXX Should attempt to run with fewer receive
6063 				 * XXX buffers instead of just failing.
6064 				 */
6065 				wm_rxdrain(rxq);
6066 				return ENOMEM;
6067 			}
6068 		} else {
6069 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
6070 				wm_init_rxdesc(rxq, i);
6071 			/*
6072 			 * For 82575 and newer device, the RX descriptors
6073 			 * must be initialized after the setting of RCTL.EN in
6074 			 * wm_set_filter()
6075 			 */
6076 		}
6077 	}
6078 	rxq->rxq_ptr = 0;
6079 	rxq->rxq_discard = 0;
6080 	WM_RXCHAIN_RESET(rxq);
6081 
6082 	return 0;
6083 }
6084 
6085 static int
6086 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6087     struct wm_rxqueue *rxq)
6088 {
6089 
6090 	KASSERT(mutex_owned(rxq->rxq_lock));
6091 
6092 	/*
6093 	 * Set up some register offsets that are different between
6094 	 * the i82542 and the i82543 and later chips.
6095 	 */
6096 	if (sc->sc_type < WM_T_82543)
6097 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
6098 	else
6099 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
6100 
6101 	wm_init_rx_regs(sc, wmq, rxq);
6102 	return wm_init_rx_buffer(sc, rxq);
6103 }
6104 
6105 /*
6106  * wm_init_quques:
6107  *	Initialize {tx,rx}descs and {tx,rx} buffers
6108  */
6109 static int
6110 wm_init_txrx_queues(struct wm_softc *sc)
6111 {
6112 	int i, error = 0;
6113 
6114 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6115 		device_xname(sc->sc_dev), __func__));
6116 
6117 	for (i = 0; i < sc->sc_nqueues; i++) {
6118 		struct wm_queue *wmq = &sc->sc_queue[i];
6119 		struct wm_txqueue *txq = &wmq->wmq_txq;
6120 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6121 
6122 		mutex_enter(txq->txq_lock);
6123 		wm_init_tx_queue(sc, wmq, txq);
6124 		mutex_exit(txq->txq_lock);
6125 
6126 		mutex_enter(rxq->rxq_lock);
6127 		error = wm_init_rx_queue(sc, wmq, rxq);
6128 		mutex_exit(rxq->rxq_lock);
6129 		if (error)
6130 			break;
6131 	}
6132 
6133 	return error;
6134 }
6135 
6136 /*
6137  * wm_tx_offload:
6138  *
6139  *	Set up TCP/IP checksumming parameters for the
6140  *	specified packet.
6141  */
6142 static int
6143 wm_tx_offload(struct wm_softc *sc, struct wm_txsoft *txs, uint32_t *cmdp,
6144     uint8_t *fieldsp)
6145 {
6146 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6147 	struct mbuf *m0 = txs->txs_mbuf;
6148 	struct livengood_tcpip_ctxdesc *t;
6149 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
6150 	uint32_t ipcse;
6151 	struct ether_header *eh;
6152 	int offset, iphl;
6153 	uint8_t fields;
6154 
6155 	/*
6156 	 * XXX It would be nice if the mbuf pkthdr had offset
6157 	 * fields for the protocol headers.
6158 	 */
6159 
6160 	eh = mtod(m0, struct ether_header *);
6161 	switch (htons(eh->ether_type)) {
6162 	case ETHERTYPE_IP:
6163 	case ETHERTYPE_IPV6:
6164 		offset = ETHER_HDR_LEN;
6165 		break;
6166 
6167 	case ETHERTYPE_VLAN:
6168 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6169 		break;
6170 
6171 	default:
6172 		/*
6173 		 * Don't support this protocol or encapsulation.
6174 		 */
6175 		*fieldsp = 0;
6176 		*cmdp = 0;
6177 		return 0;
6178 	}
6179 
6180 	if ((m0->m_pkthdr.csum_flags &
6181 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4)) != 0) {
6182 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6183 	} else {
6184 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6185 	}
6186 	ipcse = offset + iphl - 1;
6187 
6188 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6189 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6190 	seg = 0;
6191 	fields = 0;
6192 
6193 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6194 		int hlen = offset + iphl;
6195 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6196 
6197 		if (__predict_false(m0->m_len <
6198 				    (hlen + sizeof(struct tcphdr)))) {
6199 			/*
6200 			 * TCP/IP headers are not in the first mbuf; we need
6201 			 * to do this the slow and painful way.  Let's just
6202 			 * hope this doesn't happen very often.
6203 			 */
6204 			struct tcphdr th;
6205 
6206 			WM_Q_EVCNT_INCR(txq, txtsopain);
6207 
6208 			m_copydata(m0, hlen, sizeof(th), &th);
6209 			if (v4) {
6210 				struct ip ip;
6211 
6212 				m_copydata(m0, offset, sizeof(ip), &ip);
6213 				ip.ip_len = 0;
6214 				m_copyback(m0,
6215 				    offset + offsetof(struct ip, ip_len),
6216 				    sizeof(ip.ip_len), &ip.ip_len);
6217 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6218 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6219 			} else {
6220 				struct ip6_hdr ip6;
6221 
6222 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6223 				ip6.ip6_plen = 0;
6224 				m_copyback(m0,
6225 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6226 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6227 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6228 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6229 			}
6230 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6231 			    sizeof(th.th_sum), &th.th_sum);
6232 
6233 			hlen += th.th_off << 2;
6234 		} else {
6235 			/*
6236 			 * TCP/IP headers are in the first mbuf; we can do
6237 			 * this the easy way.
6238 			 */
6239 			struct tcphdr *th;
6240 
6241 			if (v4) {
6242 				struct ip *ip =
6243 				    (void *)(mtod(m0, char *) + offset);
6244 				th = (void *)(mtod(m0, char *) + hlen);
6245 
6246 				ip->ip_len = 0;
6247 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6248 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6249 			} else {
6250 				struct ip6_hdr *ip6 =
6251 				    (void *)(mtod(m0, char *) + offset);
6252 				th = (void *)(mtod(m0, char *) + hlen);
6253 
6254 				ip6->ip6_plen = 0;
6255 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6256 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6257 			}
6258 			hlen += th->th_off << 2;
6259 		}
6260 
6261 		if (v4) {
6262 			WM_Q_EVCNT_INCR(txq, txtso);
6263 			cmdlen |= WTX_TCPIP_CMD_IP;
6264 		} else {
6265 			WM_Q_EVCNT_INCR(txq, txtso6);
6266 			ipcse = 0;
6267 		}
6268 		cmd |= WTX_TCPIP_CMD_TSE;
6269 		cmdlen |= WTX_TCPIP_CMD_TSE |
6270 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
6271 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
6272 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
6273 	}
6274 
6275 	/*
6276 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
6277 	 * offload feature, if we load the context descriptor, we
6278 	 * MUST provide valid values for IPCSS and TUCSS fields.
6279 	 */
6280 
6281 	ipcs = WTX_TCPIP_IPCSS(offset) |
6282 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
6283 	    WTX_TCPIP_IPCSE(ipcse);
6284 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
6285 		WM_Q_EVCNT_INCR(txq, txipsum);
6286 		fields |= WTX_IXSM;
6287 	}
6288 
6289 	offset += iphl;
6290 
6291 	if (m0->m_pkthdr.csum_flags &
6292 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
6293 		WM_Q_EVCNT_INCR(txq, txtusum);
6294 		fields |= WTX_TXSM;
6295 		tucs = WTX_TCPIP_TUCSS(offset) |
6296 		    WTX_TCPIP_TUCSO(offset +
6297 		    M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
6298 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6299 	} else if ((m0->m_pkthdr.csum_flags &
6300 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
6301 		WM_Q_EVCNT_INCR(txq, txtusum6);
6302 		fields |= WTX_TXSM;
6303 		tucs = WTX_TCPIP_TUCSS(offset) |
6304 		    WTX_TCPIP_TUCSO(offset +
6305 		    M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
6306 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6307 	} else {
6308 		/* Just initialize it to a valid TCP context. */
6309 		tucs = WTX_TCPIP_TUCSS(offset) |
6310 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
6311 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
6312 	}
6313 
6314 	/* Fill in the context descriptor. */
6315 	t = (struct livengood_tcpip_ctxdesc *)
6316 	    &txq->txq_descs[txq->txq_next];
6317 	t->tcpip_ipcs = htole32(ipcs);
6318 	t->tcpip_tucs = htole32(tucs);
6319 	t->tcpip_cmdlen = htole32(cmdlen);
6320 	t->tcpip_seg = htole32(seg);
6321 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6322 
6323 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6324 	txs->txs_ndesc++;
6325 
6326 	*cmdp = cmd;
6327 	*fieldsp = fields;
6328 
6329 	return 0;
6330 }
6331 
6332 static inline int
6333 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
6334 {
6335 	struct wm_softc *sc = ifp->if_softc;
6336 	u_int cpuid = cpu_index(curcpu());
6337 
6338 	/*
6339 	 * Currently, simple distribute strategy.
6340 	 * TODO:
6341 	 * distribute by flowid(RSS has value).
6342 	 */
6343 	return (cpuid + sc->sc_affinity_offset) % sc->sc_nqueues;
6344 }
6345 
6346 /*
6347  * wm_start:		[ifnet interface function]
6348  *
6349  *	Start packet transmission on the interface.
6350  */
6351 static void
6352 wm_start(struct ifnet *ifp)
6353 {
6354 	struct wm_softc *sc = ifp->if_softc;
6355 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6356 
6357 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6358 
6359 	/*
6360 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
6361 	 */
6362 
6363 	mutex_enter(txq->txq_lock);
6364 	if (!txq->txq_stopping)
6365 		wm_start_locked(ifp);
6366 	mutex_exit(txq->txq_lock);
6367 }
6368 
6369 static void
6370 wm_start_locked(struct ifnet *ifp)
6371 {
6372 	struct wm_softc *sc = ifp->if_softc;
6373 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6374 
6375 	wm_send_common_locked(ifp, txq, false);
6376 }
6377 
6378 static int
6379 wm_transmit(struct ifnet *ifp, struct mbuf *m)
6380 {
6381 	int qid;
6382 	struct wm_softc *sc = ifp->if_softc;
6383 	struct wm_txqueue *txq;
6384 
6385 	qid = wm_select_txqueue(ifp, m);
6386 	txq = &sc->sc_queue[qid].wmq_txq;
6387 
6388 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6389 		m_freem(m);
6390 		WM_Q_EVCNT_INCR(txq, txdrop);
6391 		return ENOBUFS;
6392 	}
6393 
6394 	/*
6395 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
6396 	 */
6397 	ifp->if_obytes += m->m_pkthdr.len;
6398 	if (m->m_flags & M_MCAST)
6399 		ifp->if_omcasts++;
6400 
6401 	if (mutex_tryenter(txq->txq_lock)) {
6402 		if (!txq->txq_stopping)
6403 			wm_transmit_locked(ifp, txq);
6404 		mutex_exit(txq->txq_lock);
6405 	}
6406 
6407 	return 0;
6408 }
6409 
6410 static void
6411 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6412 {
6413 
6414 	wm_send_common_locked(ifp, txq, true);
6415 }
6416 
6417 static void
6418 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6419     bool is_transmit)
6420 {
6421 	struct wm_softc *sc = ifp->if_softc;
6422 	struct mbuf *m0;
6423 	struct m_tag *mtag;
6424 	struct wm_txsoft *txs;
6425 	bus_dmamap_t dmamap;
6426 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
6427 	bus_addr_t curaddr;
6428 	bus_size_t seglen, curlen;
6429 	uint32_t cksumcmd;
6430 	uint8_t cksumfields;
6431 
6432 	KASSERT(mutex_owned(txq->txq_lock));
6433 
6434 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6435 		return;
6436 
6437 	/* Remember the previous number of free descriptors. */
6438 	ofree = txq->txq_free;
6439 
6440 	/*
6441 	 * Loop through the send queue, setting up transmit descriptors
6442 	 * until we drain the queue, or use up all available transmit
6443 	 * descriptors.
6444 	 */
6445 	for (;;) {
6446 		m0 = NULL;
6447 
6448 		/* Get a work queue entry. */
6449 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
6450 			wm_txeof(sc, txq);
6451 			if (txq->txq_sfree == 0) {
6452 				DPRINTF(WM_DEBUG_TX,
6453 				    ("%s: TX: no free job descriptors\n",
6454 					device_xname(sc->sc_dev)));
6455 				WM_Q_EVCNT_INCR(txq, txsstall);
6456 				break;
6457 			}
6458 		}
6459 
6460 		/* Grab a packet off the queue. */
6461 		if (is_transmit)
6462 			m0 = pcq_get(txq->txq_interq);
6463 		else
6464 			IFQ_DEQUEUE(&ifp->if_snd, m0);
6465 		if (m0 == NULL)
6466 			break;
6467 
6468 		DPRINTF(WM_DEBUG_TX,
6469 		    ("%s: TX: have packet to transmit: %p\n",
6470 		    device_xname(sc->sc_dev), m0));
6471 
6472 		txs = &txq->txq_soft[txq->txq_snext];
6473 		dmamap = txs->txs_dmamap;
6474 
6475 		use_tso = (m0->m_pkthdr.csum_flags &
6476 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
6477 
6478 		/*
6479 		 * So says the Linux driver:
6480 		 * The controller does a simple calculation to make sure
6481 		 * there is enough room in the FIFO before initiating the
6482 		 * DMA for each buffer.  The calc is:
6483 		 *	4 = ceil(buffer len / MSS)
6484 		 * To make sure we don't overrun the FIFO, adjust the max
6485 		 * buffer len if the MSS drops.
6486 		 */
6487 		dmamap->dm_maxsegsz =
6488 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
6489 		    ? m0->m_pkthdr.segsz << 2
6490 		    : WTX_MAX_LEN;
6491 
6492 		/*
6493 		 * Load the DMA map.  If this fails, the packet either
6494 		 * didn't fit in the allotted number of segments, or we
6495 		 * were short on resources.  For the too-many-segments
6496 		 * case, we simply report an error and drop the packet,
6497 		 * since we can't sanely copy a jumbo packet to a single
6498 		 * buffer.
6499 		 */
6500 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
6501 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
6502 		if (error) {
6503 			if (error == EFBIG) {
6504 				WM_Q_EVCNT_INCR(txq, txdrop);
6505 				log(LOG_ERR, "%s: Tx packet consumes too many "
6506 				    "DMA segments, dropping...\n",
6507 				    device_xname(sc->sc_dev));
6508 				wm_dump_mbuf_chain(sc, m0);
6509 				m_freem(m0);
6510 				continue;
6511 			}
6512 			/*  Short on resources, just stop for now. */
6513 			DPRINTF(WM_DEBUG_TX,
6514 			    ("%s: TX: dmamap load failed: %d\n",
6515 			    device_xname(sc->sc_dev), error));
6516 			break;
6517 		}
6518 
6519 		segs_needed = dmamap->dm_nsegs;
6520 		if (use_tso) {
6521 			/* For sentinel descriptor; see below. */
6522 			segs_needed++;
6523 		}
6524 
6525 		/*
6526 		 * Ensure we have enough descriptors free to describe
6527 		 * the packet.  Note, we always reserve one descriptor
6528 		 * at the end of the ring due to the semantics of the
6529 		 * TDT register, plus one more in the event we need
6530 		 * to load offload context.
6531 		 */
6532 		if (segs_needed > txq->txq_free - 2) {
6533 			/*
6534 			 * Not enough free descriptors to transmit this
6535 			 * packet.  We haven't committed anything yet,
6536 			 * so just unload the DMA map, put the packet
6537 			 * pack on the queue, and punt.  Notify the upper
6538 			 * layer that there are no more slots left.
6539 			 */
6540 			DPRINTF(WM_DEBUG_TX,
6541 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
6542 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
6543 			    segs_needed, txq->txq_free - 1));
6544 			ifp->if_flags |= IFF_OACTIVE;
6545 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6546 			WM_Q_EVCNT_INCR(txq, txdstall);
6547 			break;
6548 		}
6549 
6550 		/*
6551 		 * Check for 82547 Tx FIFO bug.  We need to do this
6552 		 * once we know we can transmit the packet, since we
6553 		 * do some internal FIFO space accounting here.
6554 		 */
6555 		if (sc->sc_type == WM_T_82547 &&
6556 		    wm_82547_txfifo_bugchk(sc, m0)) {
6557 			DPRINTF(WM_DEBUG_TX,
6558 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
6559 			    device_xname(sc->sc_dev)));
6560 			ifp->if_flags |= IFF_OACTIVE;
6561 			bus_dmamap_unload(sc->sc_dmat, dmamap);
6562 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
6563 			break;
6564 		}
6565 
6566 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
6567 
6568 		DPRINTF(WM_DEBUG_TX,
6569 		    ("%s: TX: packet has %d (%d) DMA segments\n",
6570 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
6571 
6572 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
6573 
6574 		/*
6575 		 * Store a pointer to the packet so that we can free it
6576 		 * later.
6577 		 *
6578 		 * Initially, we consider the number of descriptors the
6579 		 * packet uses the number of DMA segments.  This may be
6580 		 * incremented by 1 if we do checksum offload (a descriptor
6581 		 * is used to set the checksum context).
6582 		 */
6583 		txs->txs_mbuf = m0;
6584 		txs->txs_firstdesc = txq->txq_next;
6585 		txs->txs_ndesc = segs_needed;
6586 
6587 		/* Set up offload parameters for this packet. */
6588 		if (m0->m_pkthdr.csum_flags &
6589 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
6590 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
6591 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
6592 			if (wm_tx_offload(sc, txs, &cksumcmd,
6593 					  &cksumfields) != 0) {
6594 				/* Error message already displayed. */
6595 				bus_dmamap_unload(sc->sc_dmat, dmamap);
6596 				continue;
6597 			}
6598 		} else {
6599 			cksumcmd = 0;
6600 			cksumfields = 0;
6601 		}
6602 
6603 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
6604 
6605 		/* Sync the DMA map. */
6606 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
6607 		    BUS_DMASYNC_PREWRITE);
6608 
6609 		/* Initialize the transmit descriptor. */
6610 		for (nexttx = txq->txq_next, seg = 0;
6611 		     seg < dmamap->dm_nsegs; seg++) {
6612 			for (seglen = dmamap->dm_segs[seg].ds_len,
6613 			     curaddr = dmamap->dm_segs[seg].ds_addr;
6614 			     seglen != 0;
6615 			     curaddr += curlen, seglen -= curlen,
6616 			     nexttx = WM_NEXTTX(txq, nexttx)) {
6617 				curlen = seglen;
6618 
6619 				/*
6620 				 * So says the Linux driver:
6621 				 * Work around for premature descriptor
6622 				 * write-backs in TSO mode.  Append a
6623 				 * 4-byte sentinel descriptor.
6624 				 */
6625 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
6626 				    curlen > 8)
6627 					curlen -= 4;
6628 
6629 				wm_set_dma_addr(
6630 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
6631 				txq->txq_descs[nexttx].wtx_cmdlen
6632 				    = htole32(cksumcmd | curlen);
6633 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
6634 				    = 0;
6635 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
6636 				    = cksumfields;
6637 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
6638 				lasttx = nexttx;
6639 
6640 				DPRINTF(WM_DEBUG_TX,
6641 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
6642 				     "len %#04zx\n",
6643 				    device_xname(sc->sc_dev), nexttx,
6644 				    (uint64_t)curaddr, curlen));
6645 			}
6646 		}
6647 
6648 		KASSERT(lasttx != -1);
6649 
6650 		/*
6651 		 * Set up the command byte on the last descriptor of
6652 		 * the packet.  If we're in the interrupt delay window,
6653 		 * delay the interrupt.
6654 		 */
6655 		txq->txq_descs[lasttx].wtx_cmdlen |=
6656 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
6657 
6658 		/*
6659 		 * If VLANs are enabled and the packet has a VLAN tag, set
6660 		 * up the descriptor to encapsulate the packet for us.
6661 		 *
6662 		 * This is only valid on the last descriptor of the packet.
6663 		 */
6664 		if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6665 			txq->txq_descs[lasttx].wtx_cmdlen |=
6666 			    htole32(WTX_CMD_VLE);
6667 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
6668 			    = htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
6669 		}
6670 
6671 		txs->txs_lastdesc = lasttx;
6672 
6673 		DPRINTF(WM_DEBUG_TX,
6674 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
6675 		    device_xname(sc->sc_dev),
6676 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
6677 
6678 		/* Sync the descriptors we're using. */
6679 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
6680 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6681 
6682 		/* Give the packet to the chip. */
6683 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
6684 
6685 		DPRINTF(WM_DEBUG_TX,
6686 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
6687 
6688 		DPRINTF(WM_DEBUG_TX,
6689 		    ("%s: TX: finished transmitting packet, job %d\n",
6690 		    device_xname(sc->sc_dev), txq->txq_snext));
6691 
6692 		/* Advance the tx pointer. */
6693 		txq->txq_free -= txs->txs_ndesc;
6694 		txq->txq_next = nexttx;
6695 
6696 		txq->txq_sfree--;
6697 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
6698 
6699 		/* Pass the packet to any BPF listeners. */
6700 		bpf_mtap(ifp, m0);
6701 	}
6702 
6703 	if (m0 != NULL) {
6704 		ifp->if_flags |= IFF_OACTIVE;
6705 		WM_Q_EVCNT_INCR(txq, txdrop);
6706 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
6707 			__func__));
6708 		m_freem(m0);
6709 	}
6710 
6711 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
6712 		/* No more slots; notify upper layer. */
6713 		ifp->if_flags |= IFF_OACTIVE;
6714 	}
6715 
6716 	if (txq->txq_free != ofree) {
6717 		/* Set a watchdog timer in case the chip flakes out. */
6718 		ifp->if_timer = 5;
6719 	}
6720 }
6721 
6722 /*
6723  * wm_nq_tx_offload:
6724  *
6725  *	Set up TCP/IP checksumming parameters for the
6726  *	specified packet, for NEWQUEUE devices
6727  */
6728 static int
6729 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6730     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
6731 {
6732 	struct mbuf *m0 = txs->txs_mbuf;
6733 	struct m_tag *mtag;
6734 	uint32_t vl_len, mssidx, cmdc;
6735 	struct ether_header *eh;
6736 	int offset, iphl;
6737 
6738 	/*
6739 	 * XXX It would be nice if the mbuf pkthdr had offset
6740 	 * fields for the protocol headers.
6741 	 */
6742 	*cmdlenp = 0;
6743 	*fieldsp = 0;
6744 
6745 	eh = mtod(m0, struct ether_header *);
6746 	switch (htons(eh->ether_type)) {
6747 	case ETHERTYPE_IP:
6748 	case ETHERTYPE_IPV6:
6749 		offset = ETHER_HDR_LEN;
6750 		break;
6751 
6752 	case ETHERTYPE_VLAN:
6753 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6754 		break;
6755 
6756 	default:
6757 		/* Don't support this protocol or encapsulation. */
6758 		*do_csum = false;
6759 		return 0;
6760 	}
6761 	*do_csum = true;
6762 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
6763 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
6764 
6765 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
6766 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
6767 
6768 	if ((m0->m_pkthdr.csum_flags &
6769 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6770 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6771 	} else {
6772 		iphl = M_CSUM_DATA_IPv6_HL(m0->m_pkthdr.csum_data);
6773 	}
6774 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
6775 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
6776 
6777 	if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) != NULL) {
6778 		vl_len |= ((VLAN_TAG_VALUE(mtag) & NQTXC_VLLEN_VLAN_MASK)
6779 		     << NQTXC_VLLEN_VLAN_SHIFT);
6780 		*cmdlenp |= NQTX_CMD_VLE;
6781 	}
6782 
6783 	mssidx = 0;
6784 
6785 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6786 		int hlen = offset + iphl;
6787 		int tcp_hlen;
6788 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6789 
6790 		if (__predict_false(m0->m_len <
6791 				    (hlen + sizeof(struct tcphdr)))) {
6792 			/*
6793 			 * TCP/IP headers are not in the first mbuf; we need
6794 			 * to do this the slow and painful way.  Let's just
6795 			 * hope this doesn't happen very often.
6796 			 */
6797 			struct tcphdr th;
6798 
6799 			WM_Q_EVCNT_INCR(txq, txtsopain);
6800 
6801 			m_copydata(m0, hlen, sizeof(th), &th);
6802 			if (v4) {
6803 				struct ip ip;
6804 
6805 				m_copydata(m0, offset, sizeof(ip), &ip);
6806 				ip.ip_len = 0;
6807 				m_copyback(m0,
6808 				    offset + offsetof(struct ip, ip_len),
6809 				    sizeof(ip.ip_len), &ip.ip_len);
6810 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6811 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6812 			} else {
6813 				struct ip6_hdr ip6;
6814 
6815 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6816 				ip6.ip6_plen = 0;
6817 				m_copyback(m0,
6818 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6819 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6820 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6821 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6822 			}
6823 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6824 			    sizeof(th.th_sum), &th.th_sum);
6825 
6826 			tcp_hlen = th.th_off << 2;
6827 		} else {
6828 			/*
6829 			 * TCP/IP headers are in the first mbuf; we can do
6830 			 * this the easy way.
6831 			 */
6832 			struct tcphdr *th;
6833 
6834 			if (v4) {
6835 				struct ip *ip =
6836 				    (void *)(mtod(m0, char *) + offset);
6837 				th = (void *)(mtod(m0, char *) + hlen);
6838 
6839 				ip->ip_len = 0;
6840 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6841 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6842 			} else {
6843 				struct ip6_hdr *ip6 =
6844 				    (void *)(mtod(m0, char *) + offset);
6845 				th = (void *)(mtod(m0, char *) + hlen);
6846 
6847 				ip6->ip6_plen = 0;
6848 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6849 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6850 			}
6851 			tcp_hlen = th->th_off << 2;
6852 		}
6853 		hlen += tcp_hlen;
6854 		*cmdlenp |= NQTX_CMD_TSE;
6855 
6856 		if (v4) {
6857 			WM_Q_EVCNT_INCR(txq, txtso);
6858 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
6859 		} else {
6860 			WM_Q_EVCNT_INCR(txq, txtso6);
6861 			*fieldsp |= NQTXD_FIELDS_TUXSM;
6862 		}
6863 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
6864 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6865 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
6866 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
6867 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
6868 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
6869 	} else {
6870 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
6871 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
6872 	}
6873 
6874 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
6875 		*fieldsp |= NQTXD_FIELDS_IXSM;
6876 		cmdc |= NQTXC_CMD_IP4;
6877 	}
6878 
6879 	if (m0->m_pkthdr.csum_flags &
6880 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6881 		WM_Q_EVCNT_INCR(txq, txtusum);
6882 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
6883 			cmdc |= NQTXC_CMD_TCP;
6884 		} else {
6885 			cmdc |= NQTXC_CMD_UDP;
6886 		}
6887 		cmdc |= NQTXC_CMD_IP4;
6888 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6889 	}
6890 	if (m0->m_pkthdr.csum_flags &
6891 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6892 		WM_Q_EVCNT_INCR(txq, txtusum6);
6893 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
6894 			cmdc |= NQTXC_CMD_TCP;
6895 		} else {
6896 			cmdc |= NQTXC_CMD_UDP;
6897 		}
6898 		cmdc |= NQTXC_CMD_IP6;
6899 		*fieldsp |= NQTXD_FIELDS_TUXSM;
6900 	}
6901 
6902 	/* Fill in the context descriptor. */
6903 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
6904 	    htole32(vl_len);
6905 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
6906 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
6907 	    htole32(cmdc);
6908 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
6909 	    htole32(mssidx);
6910 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
6911 	DPRINTF(WM_DEBUG_TX,
6912 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
6913 	    txq->txq_next, 0, vl_len));
6914 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
6915 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
6916 	txs->txs_ndesc++;
6917 	return 0;
6918 }
6919 
6920 /*
6921  * wm_nq_start:		[ifnet interface function]
6922  *
6923  *	Start packet transmission on the interface for NEWQUEUE devices
6924  */
6925 static void
6926 wm_nq_start(struct ifnet *ifp)
6927 {
6928 	struct wm_softc *sc = ifp->if_softc;
6929 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6930 
6931 	KASSERT(ifp->if_extflags & IFEF_START_MPSAFE);
6932 
6933 	/*
6934 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
6935 	 */
6936 
6937 	mutex_enter(txq->txq_lock);
6938 	if (!txq->txq_stopping)
6939 		wm_nq_start_locked(ifp);
6940 	mutex_exit(txq->txq_lock);
6941 }
6942 
6943 static void
6944 wm_nq_start_locked(struct ifnet *ifp)
6945 {
6946 	struct wm_softc *sc = ifp->if_softc;
6947 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6948 
6949 	wm_nq_send_common_locked(ifp, txq, false);
6950 }
6951 
6952 static int
6953 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
6954 {
6955 	int qid;
6956 	struct wm_softc *sc = ifp->if_softc;
6957 	struct wm_txqueue *txq;
6958 
6959 	qid = wm_select_txqueue(ifp, m);
6960 	txq = &sc->sc_queue[qid].wmq_txq;
6961 
6962 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
6963 		m_freem(m);
6964 		WM_Q_EVCNT_INCR(txq, txdrop);
6965 		return ENOBUFS;
6966 	}
6967 
6968 	/*
6969 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
6970 	 */
6971 	ifp->if_obytes += m->m_pkthdr.len;
6972 	if (m->m_flags & M_MCAST)
6973 		ifp->if_omcasts++;
6974 
6975 	if (mutex_tryenter(txq->txq_lock)) {
6976 		if (!txq->txq_stopping)
6977 			wm_nq_transmit_locked(ifp, txq);
6978 		mutex_exit(txq->txq_lock);
6979 	}
6980 
6981 	return 0;
6982 }
6983 
6984 static void
6985 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
6986 {
6987 
6988 	wm_nq_send_common_locked(ifp, txq, true);
6989 }
6990 
6991 static void
6992 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
6993     bool is_transmit)
6994 {
6995 	struct wm_softc *sc = ifp->if_softc;
6996 	struct mbuf *m0;
6997 	struct m_tag *mtag;
6998 	struct wm_txsoft *txs;
6999 	bus_dmamap_t dmamap;
7000 	int error, nexttx, lasttx = -1, seg, segs_needed;
7001 	bool do_csum, sent;
7002 
7003 	KASSERT(mutex_owned(txq->txq_lock));
7004 
7005 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
7006 		return;
7007 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7008 		return;
7009 
7010 	sent = false;
7011 
7012 	/*
7013 	 * Loop through the send queue, setting up transmit descriptors
7014 	 * until we drain the queue, or use up all available transmit
7015 	 * descriptors.
7016 	 */
7017 	for (;;) {
7018 		m0 = NULL;
7019 
7020 		/* Get a work queue entry. */
7021 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7022 			wm_txeof(sc, txq);
7023 			if (txq->txq_sfree == 0) {
7024 				DPRINTF(WM_DEBUG_TX,
7025 				    ("%s: TX: no free job descriptors\n",
7026 					device_xname(sc->sc_dev)));
7027 				WM_Q_EVCNT_INCR(txq, txsstall);
7028 				break;
7029 			}
7030 		}
7031 
7032 		/* Grab a packet off the queue. */
7033 		if (is_transmit)
7034 			m0 = pcq_get(txq->txq_interq);
7035 		else
7036 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7037 		if (m0 == NULL)
7038 			break;
7039 
7040 		DPRINTF(WM_DEBUG_TX,
7041 		    ("%s: TX: have packet to transmit: %p\n",
7042 		    device_xname(sc->sc_dev), m0));
7043 
7044 		txs = &txq->txq_soft[txq->txq_snext];
7045 		dmamap = txs->txs_dmamap;
7046 
7047 		/*
7048 		 * Load the DMA map.  If this fails, the packet either
7049 		 * didn't fit in the allotted number of segments, or we
7050 		 * were short on resources.  For the too-many-segments
7051 		 * case, we simply report an error and drop the packet,
7052 		 * since we can't sanely copy a jumbo packet to a single
7053 		 * buffer.
7054 		 */
7055 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7056 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7057 		if (error) {
7058 			if (error == EFBIG) {
7059 				WM_Q_EVCNT_INCR(txq, txdrop);
7060 				log(LOG_ERR, "%s: Tx packet consumes too many "
7061 				    "DMA segments, dropping...\n",
7062 				    device_xname(sc->sc_dev));
7063 				wm_dump_mbuf_chain(sc, m0);
7064 				m_freem(m0);
7065 				continue;
7066 			}
7067 			/* Short on resources, just stop for now. */
7068 			DPRINTF(WM_DEBUG_TX,
7069 			    ("%s: TX: dmamap load failed: %d\n",
7070 			    device_xname(sc->sc_dev), error));
7071 			break;
7072 		}
7073 
7074 		segs_needed = dmamap->dm_nsegs;
7075 
7076 		/*
7077 		 * Ensure we have enough descriptors free to describe
7078 		 * the packet.  Note, we always reserve one descriptor
7079 		 * at the end of the ring due to the semantics of the
7080 		 * TDT register, plus one more in the event we need
7081 		 * to load offload context.
7082 		 */
7083 		if (segs_needed > txq->txq_free - 2) {
7084 			/*
7085 			 * Not enough free descriptors to transmit this
7086 			 * packet.  We haven't committed anything yet,
7087 			 * so just unload the DMA map, put the packet
7088 			 * pack on the queue, and punt.  Notify the upper
7089 			 * layer that there are no more slots left.
7090 			 */
7091 			DPRINTF(WM_DEBUG_TX,
7092 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7093 			    device_xname(sc->sc_dev), dmamap->dm_nsegs,
7094 			    segs_needed, txq->txq_free - 1));
7095 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7096 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7097 			WM_Q_EVCNT_INCR(txq, txdstall);
7098 			break;
7099 		}
7100 
7101 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7102 
7103 		DPRINTF(WM_DEBUG_TX,
7104 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7105 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7106 
7107 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7108 
7109 		/*
7110 		 * Store a pointer to the packet so that we can free it
7111 		 * later.
7112 		 *
7113 		 * Initially, we consider the number of descriptors the
7114 		 * packet uses the number of DMA segments.  This may be
7115 		 * incremented by 1 if we do checksum offload (a descriptor
7116 		 * is used to set the checksum context).
7117 		 */
7118 		txs->txs_mbuf = m0;
7119 		txs->txs_firstdesc = txq->txq_next;
7120 		txs->txs_ndesc = segs_needed;
7121 
7122 		/* Set up offload parameters for this packet. */
7123 		uint32_t cmdlen, fields, dcmdlen;
7124 		if (m0->m_pkthdr.csum_flags &
7125 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7126 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7127 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7128 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
7129 			    &do_csum) != 0) {
7130 				/* Error message already displayed. */
7131 				bus_dmamap_unload(sc->sc_dmat, dmamap);
7132 				continue;
7133 			}
7134 		} else {
7135 			do_csum = false;
7136 			cmdlen = 0;
7137 			fields = 0;
7138 		}
7139 
7140 		/* Sync the DMA map. */
7141 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7142 		    BUS_DMASYNC_PREWRITE);
7143 
7144 		/* Initialize the first transmit descriptor. */
7145 		nexttx = txq->txq_next;
7146 		if (!do_csum) {
7147 			/* setup a legacy descriptor */
7148 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
7149 			    dmamap->dm_segs[0].ds_addr);
7150 			txq->txq_descs[nexttx].wtx_cmdlen =
7151 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
7152 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
7153 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
7154 			if ((mtag = VLAN_OUTPUT_TAG(&sc->sc_ethercom, m0)) !=
7155 			    NULL) {
7156 				txq->txq_descs[nexttx].wtx_cmdlen |=
7157 				    htole32(WTX_CMD_VLE);
7158 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
7159 				    htole16(VLAN_TAG_VALUE(mtag) & 0xffff);
7160 			} else {
7161 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7162 			}
7163 			dcmdlen = 0;
7164 		} else {
7165 			/* setup an advanced data descriptor */
7166 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7167 			    htole64(dmamap->dm_segs[0].ds_addr);
7168 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
7169 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7170 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
7171 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
7172 			    htole32(fields);
7173 			DPRINTF(WM_DEBUG_TX,
7174 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
7175 			    device_xname(sc->sc_dev), nexttx,
7176 			    (uint64_t)dmamap->dm_segs[0].ds_addr));
7177 			DPRINTF(WM_DEBUG_TX,
7178 			    ("\t 0x%08x%08x\n", fields,
7179 			    (uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
7180 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
7181 		}
7182 
7183 		lasttx = nexttx;
7184 		nexttx = WM_NEXTTX(txq, nexttx);
7185 		/*
7186 		 * fill in the next descriptors. legacy or adcanced format
7187 		 * is the same here
7188 		 */
7189 		for (seg = 1; seg < dmamap->dm_nsegs;
7190 		    seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7191 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7192 			    htole64(dmamap->dm_segs[seg].ds_addr);
7193 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7194 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7195 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7196 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7197 			lasttx = nexttx;
7198 
7199 			DPRINTF(WM_DEBUG_TX,
7200 			    ("%s: TX: desc %d: %#" PRIx64 ", "
7201 			     "len %#04zx\n",
7202 			    device_xname(sc->sc_dev), nexttx,
7203 			    (uint64_t)dmamap->dm_segs[seg].ds_addr,
7204 			    dmamap->dm_segs[seg].ds_len));
7205 		}
7206 
7207 		KASSERT(lasttx != -1);
7208 
7209 		/*
7210 		 * Set up the command byte on the last descriptor of
7211 		 * the packet.  If we're in the interrupt delay window,
7212 		 * delay the interrupt.
7213 		 */
7214 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7215 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
7216 		txq->txq_descs[lasttx].wtx_cmdlen |=
7217 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7218 
7219 		txs->txs_lastdesc = lasttx;
7220 
7221 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7222 		    device_xname(sc->sc_dev),
7223 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7224 
7225 		/* Sync the descriptors we're using. */
7226 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7227 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7228 
7229 		/* Give the packet to the chip. */
7230 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7231 		sent = true;
7232 
7233 		DPRINTF(WM_DEBUG_TX,
7234 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7235 
7236 		DPRINTF(WM_DEBUG_TX,
7237 		    ("%s: TX: finished transmitting packet, job %d\n",
7238 		    device_xname(sc->sc_dev), txq->txq_snext));
7239 
7240 		/* Advance the tx pointer. */
7241 		txq->txq_free -= txs->txs_ndesc;
7242 		txq->txq_next = nexttx;
7243 
7244 		txq->txq_sfree--;
7245 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7246 
7247 		/* Pass the packet to any BPF listeners. */
7248 		bpf_mtap(ifp, m0);
7249 	}
7250 
7251 	if (m0 != NULL) {
7252 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7253 		WM_Q_EVCNT_INCR(txq, txdrop);
7254 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7255 			__func__));
7256 		m_freem(m0);
7257 	}
7258 
7259 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7260 		/* No more slots; notify upper layer. */
7261 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7262 	}
7263 
7264 	if (sent) {
7265 		/* Set a watchdog timer in case the chip flakes out. */
7266 		ifp->if_timer = 5;
7267 	}
7268 }
7269 
7270 static void
7271 wm_deferred_start(struct ifnet *ifp)
7272 {
7273 	struct wm_softc *sc = ifp->if_softc;
7274 	int qid = 0;
7275 
7276 	/*
7277 	 * Try to transmit on all Tx queues. Passing a txq somehow and
7278 	 * transmitting only on the txq may be better.
7279 	 */
7280 restart:
7281 	WM_CORE_LOCK(sc);
7282 	if (sc->sc_core_stopping)
7283 		goto out;
7284 
7285 	for (; qid < sc->sc_nqueues; qid++) {
7286 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
7287 
7288 		if (!mutex_tryenter(txq->txq_lock))
7289 			continue;
7290 
7291 		if (txq->txq_stopping) {
7292 			mutex_exit(txq->txq_lock);
7293 			continue;
7294 		}
7295 		WM_CORE_UNLOCK(sc);
7296 
7297 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
7298 			/* XXX need for ALTQ */
7299 			if (qid == 0)
7300 				wm_nq_start_locked(ifp);
7301 			wm_nq_transmit_locked(ifp, txq);
7302 		} else {
7303 			/* XXX need for ALTQ */
7304 			if (qid == 0)
7305 				wm_start_locked(ifp);
7306 			wm_transmit_locked(ifp, txq);
7307 		}
7308 		mutex_exit(txq->txq_lock);
7309 
7310 		qid++;
7311 		goto restart;
7312 	}
7313 out:
7314 	WM_CORE_UNLOCK(sc);
7315 }
7316 
7317 /* Interrupt */
7318 
7319 /*
7320  * wm_txeof:
7321  *
7322  *	Helper; handle transmit interrupts.
7323  */
7324 static int
7325 wm_txeof(struct wm_softc *sc, struct wm_txqueue *txq)
7326 {
7327 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7328 	struct wm_txsoft *txs;
7329 	bool processed = false;
7330 	int count = 0;
7331 	int i;
7332 	uint8_t status;
7333 
7334 	KASSERT(mutex_owned(txq->txq_lock));
7335 
7336 	if (txq->txq_stopping)
7337 		return 0;
7338 
7339 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
7340 		txq->txq_flags &= ~WM_TXQ_NO_SPACE;
7341 	else
7342 		ifp->if_flags &= ~IFF_OACTIVE;
7343 
7344 	/*
7345 	 * Go through the Tx list and free mbufs for those
7346 	 * frames which have been transmitted.
7347 	 */
7348 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
7349 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
7350 		txs = &txq->txq_soft[i];
7351 
7352 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
7353 			device_xname(sc->sc_dev), i));
7354 
7355 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
7356 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
7357 
7358 		status =
7359 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
7360 		if ((status & WTX_ST_DD) == 0) {
7361 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
7362 			    BUS_DMASYNC_PREREAD);
7363 			break;
7364 		}
7365 
7366 		processed = true;
7367 		count++;
7368 		DPRINTF(WM_DEBUG_TX,
7369 		    ("%s: TX: job %d done: descs %d..%d\n",
7370 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
7371 		    txs->txs_lastdesc));
7372 
7373 		/*
7374 		 * XXX We should probably be using the statistics
7375 		 * XXX registers, but I don't know if they exist
7376 		 * XXX on chips before the i82544.
7377 		 */
7378 
7379 #ifdef WM_EVENT_COUNTERS
7380 		if (status & WTX_ST_TU)
7381 			WM_Q_EVCNT_INCR(txq, tu);
7382 #endif /* WM_EVENT_COUNTERS */
7383 
7384 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
7385 			ifp->if_oerrors++;
7386 			if (status & WTX_ST_LC)
7387 				log(LOG_WARNING, "%s: late collision\n",
7388 				    device_xname(sc->sc_dev));
7389 			else if (status & WTX_ST_EC) {
7390 				ifp->if_collisions += 16;
7391 				log(LOG_WARNING, "%s: excessive collisions\n",
7392 				    device_xname(sc->sc_dev));
7393 			}
7394 		} else
7395 			ifp->if_opackets++;
7396 
7397 		txq->txq_free += txs->txs_ndesc;
7398 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
7399 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
7400 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
7401 		m_freem(txs->txs_mbuf);
7402 		txs->txs_mbuf = NULL;
7403 	}
7404 
7405 	/* Update the dirty transmit buffer pointer. */
7406 	txq->txq_sdirty = i;
7407 	DPRINTF(WM_DEBUG_TX,
7408 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
7409 
7410 	if (count != 0)
7411 		rnd_add_uint32(&sc->rnd_source, count);
7412 
7413 	/*
7414 	 * If there are no more pending transmissions, cancel the watchdog
7415 	 * timer.
7416 	 */
7417 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
7418 		ifp->if_timer = 0;
7419 
7420 	return processed;
7421 }
7422 
7423 /*
7424  * wm_rxeof:
7425  *
7426  *	Helper; handle receive interrupts.
7427  */
7428 static void
7429 wm_rxeof(struct wm_rxqueue *rxq)
7430 {
7431 	struct wm_softc *sc = rxq->rxq_sc;
7432 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7433 	struct wm_rxsoft *rxs;
7434 	struct mbuf *m;
7435 	int i, len;
7436 	int count = 0;
7437 	uint8_t status, errors;
7438 	uint16_t vlantag;
7439 
7440 	KASSERT(mutex_owned(rxq->rxq_lock));
7441 
7442 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
7443 		rxs = &rxq->rxq_soft[i];
7444 
7445 		DPRINTF(WM_DEBUG_RX,
7446 		    ("%s: RX: checking descriptor %d\n",
7447 		    device_xname(sc->sc_dev), i));
7448 
7449 		wm_cdrxsync(rxq, i,BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
7450 
7451 		status = rxq->rxq_descs[i].wrx_status;
7452 		errors = rxq->rxq_descs[i].wrx_errors;
7453 		len = le16toh(rxq->rxq_descs[i].wrx_len);
7454 		vlantag = rxq->rxq_descs[i].wrx_special;
7455 
7456 		if ((status & WRX_ST_DD) == 0) {
7457 			/* We have processed all of the receive descriptors. */
7458 			wm_cdrxsync(rxq, i, BUS_DMASYNC_PREREAD);
7459 			break;
7460 		}
7461 
7462 		count++;
7463 		if (__predict_false(rxq->rxq_discard)) {
7464 			DPRINTF(WM_DEBUG_RX,
7465 			    ("%s: RX: discarding contents of descriptor %d\n",
7466 			    device_xname(sc->sc_dev), i));
7467 			wm_init_rxdesc(rxq, i);
7468 			if (status & WRX_ST_EOP) {
7469 				/* Reset our state. */
7470 				DPRINTF(WM_DEBUG_RX,
7471 				    ("%s: RX: resetting rxdiscard -> 0\n",
7472 				    device_xname(sc->sc_dev)));
7473 				rxq->rxq_discard = 0;
7474 			}
7475 			continue;
7476 		}
7477 
7478 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7479 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
7480 
7481 		m = rxs->rxs_mbuf;
7482 
7483 		/*
7484 		 * Add a new receive buffer to the ring, unless of
7485 		 * course the length is zero. Treat the latter as a
7486 		 * failed mapping.
7487 		 */
7488 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
7489 			/*
7490 			 * Failed, throw away what we've done so
7491 			 * far, and discard the rest of the packet.
7492 			 */
7493 			ifp->if_ierrors++;
7494 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
7495 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
7496 			wm_init_rxdesc(rxq, i);
7497 			if ((status & WRX_ST_EOP) == 0)
7498 				rxq->rxq_discard = 1;
7499 			if (rxq->rxq_head != NULL)
7500 				m_freem(rxq->rxq_head);
7501 			WM_RXCHAIN_RESET(rxq);
7502 			DPRINTF(WM_DEBUG_RX,
7503 			    ("%s: RX: Rx buffer allocation failed, "
7504 			    "dropping packet%s\n", device_xname(sc->sc_dev),
7505 			    rxq->rxq_discard ? " (discard)" : ""));
7506 			continue;
7507 		}
7508 
7509 		m->m_len = len;
7510 		rxq->rxq_len += len;
7511 		DPRINTF(WM_DEBUG_RX,
7512 		    ("%s: RX: buffer at %p len %d\n",
7513 		    device_xname(sc->sc_dev), m->m_data, len));
7514 
7515 		/* If this is not the end of the packet, keep looking. */
7516 		if ((status & WRX_ST_EOP) == 0) {
7517 			WM_RXCHAIN_LINK(rxq, m);
7518 			DPRINTF(WM_DEBUG_RX,
7519 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
7520 			    device_xname(sc->sc_dev), rxq->rxq_len));
7521 			continue;
7522 		}
7523 
7524 		/*
7525 		 * Okay, we have the entire packet now.  The chip is
7526 		 * configured to include the FCS except I350 and I21[01]
7527 		 * (not all chips can be configured to strip it),
7528 		 * so we need to trim it.
7529 		 * May need to adjust length of previous mbuf in the
7530 		 * chain if the current mbuf is too short.
7531 		 * For an eratta, the RCTL_SECRC bit in RCTL register
7532 		 * is always set in I350, so we don't trim it.
7533 		 */
7534 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
7535 		    && (sc->sc_type != WM_T_I210)
7536 		    && (sc->sc_type != WM_T_I211)) {
7537 			if (m->m_len < ETHER_CRC_LEN) {
7538 				rxq->rxq_tail->m_len
7539 				    -= (ETHER_CRC_LEN - m->m_len);
7540 				m->m_len = 0;
7541 			} else
7542 				m->m_len -= ETHER_CRC_LEN;
7543 			len = rxq->rxq_len - ETHER_CRC_LEN;
7544 		} else
7545 			len = rxq->rxq_len;
7546 
7547 		WM_RXCHAIN_LINK(rxq, m);
7548 
7549 		*rxq->rxq_tailp = NULL;
7550 		m = rxq->rxq_head;
7551 
7552 		WM_RXCHAIN_RESET(rxq);
7553 
7554 		DPRINTF(WM_DEBUG_RX,
7555 		    ("%s: RX: have entire packet, len -> %d\n",
7556 		    device_xname(sc->sc_dev), len));
7557 
7558 		/* If an error occurred, update stats and drop the packet. */
7559 		if (errors &
7560 		     (WRX_ER_CE|WRX_ER_SE|WRX_ER_SEQ|WRX_ER_CXE|WRX_ER_RXE)) {
7561 			if (errors & WRX_ER_SE)
7562 				log(LOG_WARNING, "%s: symbol error\n",
7563 				    device_xname(sc->sc_dev));
7564 			else if (errors & WRX_ER_SEQ)
7565 				log(LOG_WARNING, "%s: receive sequence error\n",
7566 				    device_xname(sc->sc_dev));
7567 			else if (errors & WRX_ER_CE)
7568 				log(LOG_WARNING, "%s: CRC error\n",
7569 				    device_xname(sc->sc_dev));
7570 			m_freem(m);
7571 			continue;
7572 		}
7573 
7574 		/* No errors.  Receive the packet. */
7575 		m_set_rcvif(m, ifp);
7576 		m->m_pkthdr.len = len;
7577 
7578 		/*
7579 		 * If VLANs are enabled, VLAN packets have been unwrapped
7580 		 * for us.  Associate the tag with the packet.
7581 		 */
7582 		/* XXXX should check for i350 and i354 */
7583 		if ((status & WRX_ST_VP) != 0) {
7584 			VLAN_INPUT_TAG(ifp, m, le16toh(vlantag), continue);
7585 		}
7586 
7587 		/* Set up checksum info for this packet. */
7588 		if ((status & WRX_ST_IXSM) == 0) {
7589 			if (status & WRX_ST_IPCS) {
7590 				WM_Q_EVCNT_INCR(rxq, rxipsum);
7591 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
7592 				if (errors & WRX_ER_IPE)
7593 					m->m_pkthdr.csum_flags |=
7594 					    M_CSUM_IPv4_BAD;
7595 			}
7596 			if (status & WRX_ST_TCPCS) {
7597 				/*
7598 				 * Note: we don't know if this was TCP or UDP,
7599 				 * so we just set both bits, and expect the
7600 				 * upper layers to deal.
7601 				 */
7602 				WM_Q_EVCNT_INCR(rxq, rxtusum);
7603 				m->m_pkthdr.csum_flags |=
7604 				    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7605 				    M_CSUM_TCPv6 | M_CSUM_UDPv6;
7606 				if (errors & WRX_ER_TCPE)
7607 					m->m_pkthdr.csum_flags |=
7608 					    M_CSUM_TCP_UDP_BAD;
7609 			}
7610 		}
7611 
7612 		mutex_exit(rxq->rxq_lock);
7613 
7614 		/* Pass it on. */
7615 		if_percpuq_enqueue(sc->sc_ipq, m);
7616 
7617 		mutex_enter(rxq->rxq_lock);
7618 
7619 		if (rxq->rxq_stopping)
7620 			break;
7621 	}
7622 
7623 	/* Update the receive pointer. */
7624 	rxq->rxq_ptr = i;
7625 	if (count != 0)
7626 		rnd_add_uint32(&sc->rnd_source, count);
7627 
7628 	DPRINTF(WM_DEBUG_RX,
7629 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
7630 }
7631 
7632 /*
7633  * wm_linkintr_gmii:
7634  *
7635  *	Helper; handle link interrupts for GMII.
7636  */
7637 static void
7638 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
7639 {
7640 
7641 	KASSERT(WM_CORE_LOCKED(sc));
7642 
7643 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7644 		__func__));
7645 
7646 	if (icr & ICR_LSC) {
7647 		uint32_t reg;
7648 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
7649 
7650 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
7651 			wm_gig_downshift_workaround_ich8lan(sc);
7652 
7653 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
7654 			device_xname(sc->sc_dev)));
7655 		mii_pollstat(&sc->sc_mii);
7656 		if (sc->sc_type == WM_T_82543) {
7657 			int miistatus, active;
7658 
7659 			/*
7660 			 * With 82543, we need to force speed and
7661 			 * duplex on the MAC equal to what the PHY
7662 			 * speed and duplex configuration is.
7663 			 */
7664 			miistatus = sc->sc_mii.mii_media_status;
7665 
7666 			if (miistatus & IFM_ACTIVE) {
7667 				active = sc->sc_mii.mii_media_active;
7668 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
7669 				switch (IFM_SUBTYPE(active)) {
7670 				case IFM_10_T:
7671 					sc->sc_ctrl |= CTRL_SPEED_10;
7672 					break;
7673 				case IFM_100_TX:
7674 					sc->sc_ctrl |= CTRL_SPEED_100;
7675 					break;
7676 				case IFM_1000_T:
7677 					sc->sc_ctrl |= CTRL_SPEED_1000;
7678 					break;
7679 				default:
7680 					/*
7681 					 * fiber?
7682 					 * Shoud not enter here.
7683 					 */
7684 					printf("unknown media (%x)\n", active);
7685 					break;
7686 				}
7687 				if (active & IFM_FDX)
7688 					sc->sc_ctrl |= CTRL_FD;
7689 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
7690 			}
7691 		} else if ((sc->sc_type == WM_T_ICH8)
7692 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
7693 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
7694 		} else if (sc->sc_type == WM_T_PCH) {
7695 			wm_k1_gig_workaround_hv(sc,
7696 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
7697 		}
7698 
7699 		if ((sc->sc_phytype == WMPHY_82578)
7700 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
7701 			== IFM_1000_T)) {
7702 
7703 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
7704 				delay(200*1000); /* XXX too big */
7705 
7706 				/* Link stall fix for link up */
7707 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7708 				    HV_MUX_DATA_CTRL,
7709 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
7710 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
7711 				wm_gmii_hv_writereg(sc->sc_dev, 1,
7712 				    HV_MUX_DATA_CTRL,
7713 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
7714 			}
7715 		}
7716 		/*
7717 		 * I217 Packet Loss issue:
7718 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
7719 		 * on power up.
7720 		 * Set the Beacon Duration for I217 to 8 usec
7721 		 */
7722 		if ((sc->sc_type == WM_T_PCH_LPT)
7723 		    || (sc->sc_type == WM_T_PCH_SPT)) {
7724 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
7725 			reg &= ~FEXTNVM4_BEACON_DURATION;
7726 			reg |= FEXTNVM4_BEACON_DURATION_8US;
7727 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
7728 		}
7729 
7730 		/* XXX Work-around I218 hang issue */
7731 		/* e1000_k1_workaround_lpt_lp() */
7732 
7733 		if ((sc->sc_type == WM_T_PCH_LPT)
7734 		    || (sc->sc_type == WM_T_PCH_SPT)) {
7735 			/*
7736 			 * Set platform power management values for Latency
7737 			 * Tolerance Reporting (LTR)
7738 			 */
7739 			wm_platform_pm_pch_lpt(sc,
7740 				((sc->sc_mii.mii_media_status & IFM_ACTIVE)
7741 				    != 0));
7742 		}
7743 
7744 		/* FEXTNVM6 K1-off workaround */
7745 		if (sc->sc_type == WM_T_PCH_SPT) {
7746 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
7747 			if (CSR_READ(sc, WMREG_PCIEANACFG)
7748 			    & FEXTNVM6_K1_OFF_ENABLE)
7749 				reg |= FEXTNVM6_K1_OFF_ENABLE;
7750 			else
7751 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
7752 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
7753 		}
7754 	} else if (icr & ICR_RXSEQ) {
7755 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
7756 			device_xname(sc->sc_dev)));
7757 	}
7758 }
7759 
7760 /*
7761  * wm_linkintr_tbi:
7762  *
7763  *	Helper; handle link interrupts for TBI mode.
7764  */
7765 static void
7766 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
7767 {
7768 	uint32_t status;
7769 
7770 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7771 		__func__));
7772 
7773 	status = CSR_READ(sc, WMREG_STATUS);
7774 	if (icr & ICR_LSC) {
7775 		if (status & STATUS_LU) {
7776 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
7777 			    device_xname(sc->sc_dev),
7778 			    (status & STATUS_FD) ? "FDX" : "HDX"));
7779 			/*
7780 			 * NOTE: CTRL will update TFCE and RFCE automatically,
7781 			 * so we should update sc->sc_ctrl
7782 			 */
7783 
7784 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
7785 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
7786 			sc->sc_fcrtl &= ~FCRTL_XONE;
7787 			if (status & STATUS_FD)
7788 				sc->sc_tctl |=
7789 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
7790 			else
7791 				sc->sc_tctl |=
7792 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
7793 			if (sc->sc_ctrl & CTRL_TFCE)
7794 				sc->sc_fcrtl |= FCRTL_XONE;
7795 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
7796 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
7797 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
7798 				      sc->sc_fcrtl);
7799 			sc->sc_tbi_linkup = 1;
7800 		} else {
7801 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
7802 			    device_xname(sc->sc_dev)));
7803 			sc->sc_tbi_linkup = 0;
7804 		}
7805 		/* Update LED */
7806 		wm_tbi_serdes_set_linkled(sc);
7807 	} else if (icr & ICR_RXSEQ) {
7808 		DPRINTF(WM_DEBUG_LINK,
7809 		    ("%s: LINK: Receive sequence error\n",
7810 		    device_xname(sc->sc_dev)));
7811 	}
7812 }
7813 
7814 /*
7815  * wm_linkintr_serdes:
7816  *
7817  *	Helper; handle link interrupts for TBI mode.
7818  */
7819 static void
7820 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
7821 {
7822 	struct mii_data *mii = &sc->sc_mii;
7823 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
7824 	uint32_t pcs_adv, pcs_lpab, reg;
7825 
7826 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
7827 		__func__));
7828 
7829 	if (icr & ICR_LSC) {
7830 		/* Check PCS */
7831 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
7832 		if ((reg & PCS_LSTS_LINKOK) != 0) {
7833 			mii->mii_media_status |= IFM_ACTIVE;
7834 			sc->sc_tbi_linkup = 1;
7835 		} else {
7836 			mii->mii_media_status |= IFM_NONE;
7837 			sc->sc_tbi_linkup = 0;
7838 			wm_tbi_serdes_set_linkled(sc);
7839 			return;
7840 		}
7841 		mii->mii_media_active |= IFM_1000_SX;
7842 		if ((reg & PCS_LSTS_FDX) != 0)
7843 			mii->mii_media_active |= IFM_FDX;
7844 		else
7845 			mii->mii_media_active |= IFM_HDX;
7846 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
7847 			/* Check flow */
7848 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
7849 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
7850 				DPRINTF(WM_DEBUG_LINK,
7851 				    ("XXX LINKOK but not ACOMP\n"));
7852 				return;
7853 			}
7854 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
7855 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
7856 			DPRINTF(WM_DEBUG_LINK,
7857 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
7858 			if ((pcs_adv & TXCW_SYM_PAUSE)
7859 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
7860 				mii->mii_media_active |= IFM_FLOW
7861 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
7862 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
7863 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7864 			    && (pcs_lpab & TXCW_SYM_PAUSE)
7865 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7866 				mii->mii_media_active |= IFM_FLOW
7867 				    | IFM_ETH_TXPAUSE;
7868 			else if ((pcs_adv & TXCW_SYM_PAUSE)
7869 			    && (pcs_adv & TXCW_ASYM_PAUSE)
7870 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
7871 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
7872 				mii->mii_media_active |= IFM_FLOW
7873 				    | IFM_ETH_RXPAUSE;
7874 		}
7875 		/* Update LED */
7876 		wm_tbi_serdes_set_linkled(sc);
7877 	} else {
7878 		DPRINTF(WM_DEBUG_LINK,
7879 		    ("%s: LINK: Receive sequence error\n",
7880 		    device_xname(sc->sc_dev)));
7881 	}
7882 }
7883 
7884 /*
7885  * wm_linkintr:
7886  *
7887  *	Helper; handle link interrupts.
7888  */
7889 static void
7890 wm_linkintr(struct wm_softc *sc, uint32_t icr)
7891 {
7892 
7893 	KASSERT(WM_CORE_LOCKED(sc));
7894 
7895 	if (sc->sc_flags & WM_F_HAS_MII)
7896 		wm_linkintr_gmii(sc, icr);
7897 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
7898 	    && (sc->sc_type >= WM_T_82575))
7899 		wm_linkintr_serdes(sc, icr);
7900 	else
7901 		wm_linkintr_tbi(sc, icr);
7902 }
7903 
7904 /*
7905  * wm_intr_legacy:
7906  *
7907  *	Interrupt service routine for INTx and MSI.
7908  */
7909 static int
7910 wm_intr_legacy(void *arg)
7911 {
7912 	struct wm_softc *sc = arg;
7913 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7914 	struct wm_rxqueue *rxq = &sc->sc_queue[0].wmq_rxq;
7915 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
7916 	uint32_t icr, rndval = 0;
7917 	int handled = 0;
7918 
7919 	DPRINTF(WM_DEBUG_TX,
7920 	    ("%s: INTx: got intr\n", device_xname(sc->sc_dev)));
7921 	while (1 /* CONSTCOND */) {
7922 		icr = CSR_READ(sc, WMREG_ICR);
7923 		if ((icr & sc->sc_icr) == 0)
7924 			break;
7925 		if (rndval == 0)
7926 			rndval = icr;
7927 
7928 		mutex_enter(rxq->rxq_lock);
7929 
7930 		if (rxq->rxq_stopping) {
7931 			mutex_exit(rxq->rxq_lock);
7932 			break;
7933 		}
7934 
7935 		handled = 1;
7936 
7937 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7938 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
7939 			DPRINTF(WM_DEBUG_RX,
7940 			    ("%s: RX: got Rx intr 0x%08x\n",
7941 			    device_xname(sc->sc_dev),
7942 			    icr & (ICR_RXDMT0 | ICR_RXT0)));
7943 			WM_Q_EVCNT_INCR(rxq, rxintr);
7944 		}
7945 #endif
7946 		wm_rxeof(rxq);
7947 
7948 		mutex_exit(rxq->rxq_lock);
7949 		mutex_enter(txq->txq_lock);
7950 
7951 		if (txq->txq_stopping) {
7952 			mutex_exit(txq->txq_lock);
7953 			break;
7954 		}
7955 
7956 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
7957 		if (icr & ICR_TXDW) {
7958 			DPRINTF(WM_DEBUG_TX,
7959 			    ("%s: TX: got TXDW interrupt\n",
7960 			    device_xname(sc->sc_dev)));
7961 			WM_Q_EVCNT_INCR(txq, txdw);
7962 		}
7963 #endif
7964 		wm_txeof(sc, txq);
7965 
7966 		mutex_exit(txq->txq_lock);
7967 		WM_CORE_LOCK(sc);
7968 
7969 		if (sc->sc_core_stopping) {
7970 			WM_CORE_UNLOCK(sc);
7971 			break;
7972 		}
7973 
7974 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
7975 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
7976 			wm_linkintr(sc, icr);
7977 		}
7978 
7979 		WM_CORE_UNLOCK(sc);
7980 
7981 		if (icr & ICR_RXO) {
7982 #if defined(WM_DEBUG)
7983 			log(LOG_WARNING, "%s: Receive overrun\n",
7984 			    device_xname(sc->sc_dev));
7985 #endif /* defined(WM_DEBUG) */
7986 		}
7987 	}
7988 
7989 	rnd_add_uint32(&sc->rnd_source, rndval);
7990 
7991 	if (handled) {
7992 		/* Try to get more packets going. */
7993 		if_schedule_deferred_start(ifp);
7994 	}
7995 
7996 	return handled;
7997 }
7998 
7999 static int
8000 wm_txrxintr_msix(void *arg)
8001 {
8002 	struct wm_queue *wmq = arg;
8003 	struct wm_txqueue *txq = &wmq->wmq_txq;
8004 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8005 	struct wm_softc *sc = txq->txq_sc;
8006 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8007 
8008 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
8009 
8010 	DPRINTF(WM_DEBUG_TX,
8011 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
8012 
8013 	if (sc->sc_type == WM_T_82574)
8014 		CSR_WRITE(sc, WMREG_IMC, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8015 	else if (sc->sc_type == WM_T_82575)
8016 		CSR_WRITE(sc, WMREG_EIMC, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8017 	else
8018 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
8019 
8020 	mutex_enter(txq->txq_lock);
8021 
8022 	if (txq->txq_stopping) {
8023 		mutex_exit(txq->txq_lock);
8024 		return 0;
8025 	}
8026 
8027 	WM_Q_EVCNT_INCR(txq, txdw);
8028 	wm_txeof(sc, txq);
8029 
8030 	/* Try to get more packets going. */
8031 	if (pcq_peek(txq->txq_interq) != NULL)
8032 		if_schedule_deferred_start(ifp);
8033 	/*
8034 	 * There are still some upper layer processing which call
8035 	 * ifp->if_start(). e.g. ALTQ
8036 	 */
8037 	if (wmq->wmq_id == 0)
8038 		if_schedule_deferred_start(ifp);
8039 
8040 	mutex_exit(txq->txq_lock);
8041 
8042 	DPRINTF(WM_DEBUG_RX,
8043 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
8044 	mutex_enter(rxq->rxq_lock);
8045 
8046 	if (rxq->rxq_stopping) {
8047 		mutex_exit(rxq->rxq_lock);
8048 		return 0;
8049 	}
8050 
8051 	WM_Q_EVCNT_INCR(rxq, rxintr);
8052 	wm_rxeof(rxq);
8053 	mutex_exit(rxq->rxq_lock);
8054 
8055 	if (sc->sc_type == WM_T_82574)
8056 		CSR_WRITE(sc, WMREG_IMS, ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
8057 	else if (sc->sc_type == WM_T_82575)
8058 		CSR_WRITE(sc, WMREG_EIMS, EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
8059 	else
8060 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
8061 
8062 	return 1;
8063 }
8064 
8065 /*
8066  * wm_linkintr_msix:
8067  *
8068  *	Interrupt service routine for link status change for MSI-X.
8069  */
8070 static int
8071 wm_linkintr_msix(void *arg)
8072 {
8073 	struct wm_softc *sc = arg;
8074 	uint32_t reg;
8075 
8076 	DPRINTF(WM_DEBUG_LINK,
8077 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
8078 
8079 	reg = CSR_READ(sc, WMREG_ICR);
8080 	WM_CORE_LOCK(sc);
8081 	if ((sc->sc_core_stopping) || ((reg & ICR_LSC) == 0))
8082 		goto out;
8083 
8084 	WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8085 	wm_linkintr(sc, ICR_LSC);
8086 
8087 out:
8088 	WM_CORE_UNLOCK(sc);
8089 
8090 	if (sc->sc_type == WM_T_82574)
8091 		CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
8092 	else if (sc->sc_type == WM_T_82575)
8093 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
8094 	else
8095 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
8096 
8097 	return 1;
8098 }
8099 
8100 /*
8101  * Media related.
8102  * GMII, SGMII, TBI (and SERDES)
8103  */
8104 
8105 /* Common */
8106 
8107 /*
8108  * wm_tbi_serdes_set_linkled:
8109  *
8110  *	Update the link LED on TBI and SERDES devices.
8111  */
8112 static void
8113 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
8114 {
8115 
8116 	if (sc->sc_tbi_linkup)
8117 		sc->sc_ctrl |= CTRL_SWDPIN(0);
8118 	else
8119 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
8120 
8121 	/* 82540 or newer devices are active low */
8122 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
8123 
8124 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8125 }
8126 
8127 /* GMII related */
8128 
8129 /*
8130  * wm_gmii_reset:
8131  *
8132  *	Reset the PHY.
8133  */
8134 static void
8135 wm_gmii_reset(struct wm_softc *sc)
8136 {
8137 	uint32_t reg;
8138 	int rv;
8139 
8140 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
8141 		device_xname(sc->sc_dev), __func__));
8142 
8143 	rv = sc->phy.acquire(sc);
8144 	if (rv != 0) {
8145 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8146 		    __func__);
8147 		return;
8148 	}
8149 
8150 	switch (sc->sc_type) {
8151 	case WM_T_82542_2_0:
8152 	case WM_T_82542_2_1:
8153 		/* null */
8154 		break;
8155 	case WM_T_82543:
8156 		/*
8157 		 * With 82543, we need to force speed and duplex on the MAC
8158 		 * equal to what the PHY speed and duplex configuration is.
8159 		 * In addition, we need to perform a hardware reset on the PHY
8160 		 * to take it out of reset.
8161 		 */
8162 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8163 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8164 
8165 		/* The PHY reset pin is active-low. */
8166 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
8167 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
8168 		    CTRL_EXT_SWDPIN(4));
8169 		reg |= CTRL_EXT_SWDPIO(4);
8170 
8171 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
8172 		CSR_WRITE_FLUSH(sc);
8173 		delay(10*1000);
8174 
8175 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
8176 		CSR_WRITE_FLUSH(sc);
8177 		delay(150);
8178 #if 0
8179 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
8180 #endif
8181 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
8182 		break;
8183 	case WM_T_82544:	/* reset 10000us */
8184 	case WM_T_82540:
8185 	case WM_T_82545:
8186 	case WM_T_82545_3:
8187 	case WM_T_82546:
8188 	case WM_T_82546_3:
8189 	case WM_T_82541:
8190 	case WM_T_82541_2:
8191 	case WM_T_82547:
8192 	case WM_T_82547_2:
8193 	case WM_T_82571:	/* reset 100us */
8194 	case WM_T_82572:
8195 	case WM_T_82573:
8196 	case WM_T_82574:
8197 	case WM_T_82575:
8198 	case WM_T_82576:
8199 	case WM_T_82580:
8200 	case WM_T_I350:
8201 	case WM_T_I354:
8202 	case WM_T_I210:
8203 	case WM_T_I211:
8204 	case WM_T_82583:
8205 	case WM_T_80003:
8206 		/* generic reset */
8207 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8208 		CSR_WRITE_FLUSH(sc);
8209 		delay(20000);
8210 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8211 		CSR_WRITE_FLUSH(sc);
8212 		delay(20000);
8213 
8214 		if ((sc->sc_type == WM_T_82541)
8215 		    || (sc->sc_type == WM_T_82541_2)
8216 		    || (sc->sc_type == WM_T_82547)
8217 		    || (sc->sc_type == WM_T_82547_2)) {
8218 			/* workaround for igp are done in igp_reset() */
8219 			/* XXX add code to set LED after phy reset */
8220 		}
8221 		break;
8222 	case WM_T_ICH8:
8223 	case WM_T_ICH9:
8224 	case WM_T_ICH10:
8225 	case WM_T_PCH:
8226 	case WM_T_PCH2:
8227 	case WM_T_PCH_LPT:
8228 	case WM_T_PCH_SPT:
8229 		/* generic reset */
8230 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
8231 		CSR_WRITE_FLUSH(sc);
8232 		delay(100);
8233 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8234 		CSR_WRITE_FLUSH(sc);
8235 		delay(150);
8236 		break;
8237 	default:
8238 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
8239 		    __func__);
8240 		break;
8241 	}
8242 
8243 	sc->phy.release(sc);
8244 
8245 	/* get_cfg_done */
8246 	wm_get_cfg_done(sc);
8247 
8248 	/* extra setup */
8249 	switch (sc->sc_type) {
8250 	case WM_T_82542_2_0:
8251 	case WM_T_82542_2_1:
8252 	case WM_T_82543:
8253 	case WM_T_82544:
8254 	case WM_T_82540:
8255 	case WM_T_82545:
8256 	case WM_T_82545_3:
8257 	case WM_T_82546:
8258 	case WM_T_82546_3:
8259 	case WM_T_82541_2:
8260 	case WM_T_82547_2:
8261 	case WM_T_82571:
8262 	case WM_T_82572:
8263 	case WM_T_82573:
8264 	case WM_T_82575:
8265 	case WM_T_82576:
8266 	case WM_T_82580:
8267 	case WM_T_I350:
8268 	case WM_T_I354:
8269 	case WM_T_I210:
8270 	case WM_T_I211:
8271 	case WM_T_80003:
8272 		/* null */
8273 		break;
8274 	case WM_T_82574:
8275 	case WM_T_82583:
8276 		wm_lplu_d0_disable(sc);
8277 		break;
8278 	case WM_T_82541:
8279 	case WM_T_82547:
8280 		/* XXX Configure actively LED after PHY reset */
8281 		break;
8282 	case WM_T_ICH8:
8283 	case WM_T_ICH9:
8284 	case WM_T_ICH10:
8285 	case WM_T_PCH:
8286 	case WM_T_PCH2:
8287 	case WM_T_PCH_LPT:
8288 	case WM_T_PCH_SPT:
8289 		/* Allow time for h/w to get to a quiescent state afer reset */
8290 		delay(10*1000);
8291 
8292 		if (sc->sc_type == WM_T_PCH)
8293 			wm_hv_phy_workaround_ich8lan(sc);
8294 
8295 		if (sc->sc_type == WM_T_PCH2)
8296 			wm_lv_phy_workaround_ich8lan(sc);
8297 
8298 		/* Clear the host wakeup bit after lcd reset */
8299 		if (sc->sc_type >= WM_T_PCH) {
8300 			reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
8301 			    BM_PORT_GEN_CFG);
8302 			reg &= ~BM_WUC_HOST_WU_BIT;
8303 			wm_gmii_hv_writereg(sc->sc_dev, 2,
8304 			    BM_PORT_GEN_CFG, reg);
8305 		}
8306 
8307 		/*
8308 		 * XXX Configure the LCD with th extended configuration region
8309 		 * in NVM
8310 		 */
8311 
8312 		/* Disable D0 LPLU. */
8313 		if (sc->sc_type >= WM_T_PCH)	/* PCH* */
8314 			wm_lplu_d0_disable_pch(sc);
8315 		else
8316 			wm_lplu_d0_disable(sc);	/* ICH* */
8317 		break;
8318 	default:
8319 		panic("%s: unknown type\n", __func__);
8320 		break;
8321 	}
8322 }
8323 
8324 /*
8325  * wm_get_phy_id_82575:
8326  *
8327  * Return PHY ID. Return -1 if it failed.
8328  */
8329 static int
8330 wm_get_phy_id_82575(struct wm_softc *sc)
8331 {
8332 	uint32_t reg;
8333 	int phyid = -1;
8334 
8335 	/* XXX */
8336 	if ((sc->sc_flags & WM_F_SGMII) == 0)
8337 		return -1;
8338 
8339 	if (wm_sgmii_uses_mdio(sc)) {
8340 		switch (sc->sc_type) {
8341 		case WM_T_82575:
8342 		case WM_T_82576:
8343 			reg = CSR_READ(sc, WMREG_MDIC);
8344 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
8345 			break;
8346 		case WM_T_82580:
8347 		case WM_T_I350:
8348 		case WM_T_I354:
8349 		case WM_T_I210:
8350 		case WM_T_I211:
8351 			reg = CSR_READ(sc, WMREG_MDICNFG);
8352 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
8353 			break;
8354 		default:
8355 			return -1;
8356 		}
8357 	}
8358 
8359 	return phyid;
8360 }
8361 
8362 
8363 /*
8364  * wm_gmii_mediainit:
8365  *
8366  *	Initialize media for use on 1000BASE-T devices.
8367  */
8368 static void
8369 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
8370 {
8371 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8372 	struct mii_data *mii = &sc->sc_mii;
8373 	uint32_t reg;
8374 
8375 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8376 		device_xname(sc->sc_dev), __func__));
8377 
8378 	/* We have GMII. */
8379 	sc->sc_flags |= WM_F_HAS_MII;
8380 
8381 	if (sc->sc_type == WM_T_80003)
8382 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
8383 	else
8384 		sc->sc_tipg = TIPG_1000T_DFLT;
8385 
8386 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
8387 	if ((sc->sc_type == WM_T_82580)
8388 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
8389 	    || (sc->sc_type == WM_T_I211)) {
8390 		reg = CSR_READ(sc, WMREG_PHPM);
8391 		reg &= ~PHPM_GO_LINK_D;
8392 		CSR_WRITE(sc, WMREG_PHPM, reg);
8393 	}
8394 
8395 	/*
8396 	 * Let the chip set speed/duplex on its own based on
8397 	 * signals from the PHY.
8398 	 * XXXbouyer - I'm not sure this is right for the 80003,
8399 	 * the em driver only sets CTRL_SLU here - but it seems to work.
8400 	 */
8401 	sc->sc_ctrl |= CTRL_SLU;
8402 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8403 
8404 	/* Initialize our media structures and probe the GMII. */
8405 	mii->mii_ifp = ifp;
8406 
8407 	/*
8408 	 * Determine the PHY access method.
8409 	 *
8410 	 *  For SGMII, use SGMII specific method.
8411 	 *
8412 	 *  For some devices, we can determine the PHY access method
8413 	 * from sc_type.
8414 	 *
8415 	 *  For ICH and PCH variants, it's difficult to determine the PHY
8416 	 * access  method by sc_type, so use the PCI product ID for some
8417 	 * devices.
8418 	 * For other ICH8 variants, try to use igp's method. If the PHY
8419 	 * can't detect, then use bm's method.
8420 	 */
8421 	switch (prodid) {
8422 	case PCI_PRODUCT_INTEL_PCH_M_LM:
8423 	case PCI_PRODUCT_INTEL_PCH_M_LC:
8424 		/* 82577 */
8425 		sc->sc_phytype = WMPHY_82577;
8426 		break;
8427 	case PCI_PRODUCT_INTEL_PCH_D_DM:
8428 	case PCI_PRODUCT_INTEL_PCH_D_DC:
8429 		/* 82578 */
8430 		sc->sc_phytype = WMPHY_82578;
8431 		break;
8432 	case PCI_PRODUCT_INTEL_PCH2_LV_LM:
8433 	case PCI_PRODUCT_INTEL_PCH2_LV_V:
8434 		/* 82579 */
8435 		sc->sc_phytype = WMPHY_82579;
8436 		break;
8437 	case PCI_PRODUCT_INTEL_82801H_82567V_3:
8438 	case PCI_PRODUCT_INTEL_82801I_BM:
8439 	case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
8440 	case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
8441 	case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
8442 	case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
8443 	case PCI_PRODUCT_INTEL_82801J_R_BM_V:
8444 		/* ICH8, 9, 10 with 82567 */
8445 		sc->sc_phytype = WMPHY_BM;
8446 		mii->mii_readreg = wm_gmii_bm_readreg;
8447 		mii->mii_writereg = wm_gmii_bm_writereg;
8448 		break;
8449 	default:
8450 		if (((sc->sc_flags & WM_F_SGMII) != 0)
8451 		    && !wm_sgmii_uses_mdio(sc)){
8452 			/* SGMII */
8453 			mii->mii_readreg = wm_sgmii_readreg;
8454 			mii->mii_writereg = wm_sgmii_writereg;
8455 		} else if ((sc->sc_type == WM_T_82574)
8456 		    || (sc->sc_type == WM_T_82583)) {
8457 			/* BM2 (phyaddr == 1) */
8458 			sc->sc_phytype = WMPHY_BM;
8459 			mii->mii_readreg = wm_gmii_bm_readreg;
8460 			mii->mii_writereg = wm_gmii_bm_writereg;
8461 		} else if (sc->sc_type >= WM_T_ICH8) {
8462 			/* non-82567 ICH8, 9 and 10 */
8463 			mii->mii_readreg = wm_gmii_i82544_readreg;
8464 			mii->mii_writereg = wm_gmii_i82544_writereg;
8465 		} else if (sc->sc_type >= WM_T_80003) {
8466 			/* 80003 */
8467 			sc->sc_phytype = WMPHY_GG82563;
8468 			mii->mii_readreg = wm_gmii_i80003_readreg;
8469 			mii->mii_writereg = wm_gmii_i80003_writereg;
8470 		} else if (sc->sc_type >= WM_T_I210) {
8471 			/* I210 and I211 */
8472 			sc->sc_phytype = WMPHY_210;
8473 			mii->mii_readreg = wm_gmii_gs40g_readreg;
8474 			mii->mii_writereg = wm_gmii_gs40g_writereg;
8475 		} else if (sc->sc_type >= WM_T_82580) {
8476 			/* 82580, I350 and I354 */
8477 			sc->sc_phytype = WMPHY_82580;
8478 			mii->mii_readreg = wm_gmii_82580_readreg;
8479 			mii->mii_writereg = wm_gmii_82580_writereg;
8480 		} else if (sc->sc_type >= WM_T_82544) {
8481 			/* 82544, 0, [56], [17], 8257[1234] and 82583 */
8482 			mii->mii_readreg = wm_gmii_i82544_readreg;
8483 			mii->mii_writereg = wm_gmii_i82544_writereg;
8484 		} else {
8485 			mii->mii_readreg = wm_gmii_i82543_readreg;
8486 			mii->mii_writereg = wm_gmii_i82543_writereg;
8487 		}
8488 		break;
8489 	}
8490 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_SPT)) {
8491 		/* All PCH* use _hv_ */
8492 		mii->mii_readreg = wm_gmii_hv_readreg;
8493 		mii->mii_writereg = wm_gmii_hv_writereg;
8494 	}
8495 	mii->mii_statchg = wm_gmii_statchg;
8496 
8497 	/* get PHY control from SMBus to PCIe */
8498 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
8499 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT))
8500 		wm_smbustopci(sc);
8501 
8502 	wm_gmii_reset(sc);
8503 
8504 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
8505 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
8506 	    wm_gmii_mediastatus);
8507 
8508 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
8509 	    || (sc->sc_type == WM_T_82580)
8510 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
8511 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
8512 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
8513 			/* Attach only one port */
8514 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
8515 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
8516 		} else {
8517 			int i, id;
8518 			uint32_t ctrl_ext;
8519 
8520 			id = wm_get_phy_id_82575(sc);
8521 			if (id != -1) {
8522 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
8523 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
8524 			}
8525 			if ((id == -1)
8526 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
8527 				/* Power on sgmii phy if it is disabled */
8528 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
8529 				CSR_WRITE(sc, WMREG_CTRL_EXT,
8530 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
8531 				CSR_WRITE_FLUSH(sc);
8532 				delay(300*1000); /* XXX too long */
8533 
8534 				/* from 1 to 8 */
8535 				for (i = 1; i < 8; i++)
8536 					mii_attach(sc->sc_dev, &sc->sc_mii,
8537 					    0xffffffff, i, MII_OFFSET_ANY,
8538 					    MIIF_DOPAUSE);
8539 
8540 				/* restore previous sfp cage power state */
8541 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
8542 			}
8543 		}
8544 	} else {
8545 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8546 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8547 	}
8548 
8549 	/*
8550 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
8551 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
8552 	 */
8553 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) &&
8554 	    (LIST_FIRST(&mii->mii_phys) == NULL)) {
8555 		wm_set_mdio_slow_mode_hv(sc);
8556 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8557 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8558 	}
8559 
8560 	/*
8561 	 * (For ICH8 variants)
8562 	 * If PHY detection failed, use BM's r/w function and retry.
8563 	 */
8564 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8565 		/* if failed, retry with *_bm_* */
8566 		mii->mii_readreg = wm_gmii_bm_readreg;
8567 		mii->mii_writereg = wm_gmii_bm_writereg;
8568 
8569 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
8570 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
8571 	}
8572 
8573 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
8574 		/* Any PHY wasn't find */
8575 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
8576 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
8577 		sc->sc_phytype = WMPHY_NONE;
8578 	} else {
8579 		/*
8580 		 * PHY Found!
8581 		 * Check PHY type.
8582 		 */
8583 		uint32_t model;
8584 		struct mii_softc *child;
8585 
8586 		child = LIST_FIRST(&mii->mii_phys);
8587 		model = child->mii_mpd_model;
8588 		if (model == MII_MODEL_yyINTEL_I82566)
8589 			sc->sc_phytype = WMPHY_IGP_3;
8590 
8591 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
8592 	}
8593 }
8594 
8595 /*
8596  * wm_gmii_mediachange:	[ifmedia interface function]
8597  *
8598  *	Set hardware to newly-selected media on a 1000BASE-T device.
8599  */
8600 static int
8601 wm_gmii_mediachange(struct ifnet *ifp)
8602 {
8603 	struct wm_softc *sc = ifp->if_softc;
8604 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8605 	int rc;
8606 
8607 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
8608 		device_xname(sc->sc_dev), __func__));
8609 	if ((ifp->if_flags & IFF_UP) == 0)
8610 		return 0;
8611 
8612 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8613 	sc->sc_ctrl |= CTRL_SLU;
8614 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
8615 	    || (sc->sc_type > WM_T_82543)) {
8616 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
8617 	} else {
8618 		sc->sc_ctrl &= ~CTRL_ASDE;
8619 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
8620 		if (ife->ifm_media & IFM_FDX)
8621 			sc->sc_ctrl |= CTRL_FD;
8622 		switch (IFM_SUBTYPE(ife->ifm_media)) {
8623 		case IFM_10_T:
8624 			sc->sc_ctrl |= CTRL_SPEED_10;
8625 			break;
8626 		case IFM_100_TX:
8627 			sc->sc_ctrl |= CTRL_SPEED_100;
8628 			break;
8629 		case IFM_1000_T:
8630 			sc->sc_ctrl |= CTRL_SPEED_1000;
8631 			break;
8632 		default:
8633 			panic("wm_gmii_mediachange: bad media 0x%x",
8634 			    ife->ifm_media);
8635 		}
8636 	}
8637 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8638 	if (sc->sc_type <= WM_T_82543)
8639 		wm_gmii_reset(sc);
8640 
8641 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
8642 		return 0;
8643 	return rc;
8644 }
8645 
8646 /*
8647  * wm_gmii_mediastatus:	[ifmedia interface function]
8648  *
8649  *	Get the current interface media status on a 1000BASE-T device.
8650  */
8651 static void
8652 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
8653 {
8654 	struct wm_softc *sc = ifp->if_softc;
8655 
8656 	ether_mediastatus(ifp, ifmr);
8657 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
8658 	    | sc->sc_flowflags;
8659 }
8660 
8661 #define	MDI_IO		CTRL_SWDPIN(2)
8662 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
8663 #define	MDI_CLK		CTRL_SWDPIN(3)
8664 
8665 static void
8666 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
8667 {
8668 	uint32_t i, v;
8669 
8670 	v = CSR_READ(sc, WMREG_CTRL);
8671 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8672 	v |= MDI_DIR | CTRL_SWDPIO(3);
8673 
8674 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
8675 		if (data & i)
8676 			v |= MDI_IO;
8677 		else
8678 			v &= ~MDI_IO;
8679 		CSR_WRITE(sc, WMREG_CTRL, v);
8680 		CSR_WRITE_FLUSH(sc);
8681 		delay(10);
8682 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8683 		CSR_WRITE_FLUSH(sc);
8684 		delay(10);
8685 		CSR_WRITE(sc, WMREG_CTRL, v);
8686 		CSR_WRITE_FLUSH(sc);
8687 		delay(10);
8688 	}
8689 }
8690 
8691 static uint32_t
8692 wm_i82543_mii_recvbits(struct wm_softc *sc)
8693 {
8694 	uint32_t v, i, data = 0;
8695 
8696 	v = CSR_READ(sc, WMREG_CTRL);
8697 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
8698 	v |= CTRL_SWDPIO(3);
8699 
8700 	CSR_WRITE(sc, WMREG_CTRL, v);
8701 	CSR_WRITE_FLUSH(sc);
8702 	delay(10);
8703 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8704 	CSR_WRITE_FLUSH(sc);
8705 	delay(10);
8706 	CSR_WRITE(sc, WMREG_CTRL, v);
8707 	CSR_WRITE_FLUSH(sc);
8708 	delay(10);
8709 
8710 	for (i = 0; i < 16; i++) {
8711 		data <<= 1;
8712 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8713 		CSR_WRITE_FLUSH(sc);
8714 		delay(10);
8715 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
8716 			data |= 1;
8717 		CSR_WRITE(sc, WMREG_CTRL, v);
8718 		CSR_WRITE_FLUSH(sc);
8719 		delay(10);
8720 	}
8721 
8722 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
8723 	CSR_WRITE_FLUSH(sc);
8724 	delay(10);
8725 	CSR_WRITE(sc, WMREG_CTRL, v);
8726 	CSR_WRITE_FLUSH(sc);
8727 	delay(10);
8728 
8729 	return data;
8730 }
8731 
8732 #undef MDI_IO
8733 #undef MDI_DIR
8734 #undef MDI_CLK
8735 
8736 /*
8737  * wm_gmii_i82543_readreg:	[mii interface function]
8738  *
8739  *	Read a PHY register on the GMII (i82543 version).
8740  */
8741 static int
8742 wm_gmii_i82543_readreg(device_t self, int phy, int reg)
8743 {
8744 	struct wm_softc *sc = device_private(self);
8745 	int rv;
8746 
8747 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8748 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
8749 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
8750 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
8751 
8752 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
8753 	    device_xname(sc->sc_dev), phy, reg, rv));
8754 
8755 	return rv;
8756 }
8757 
8758 /*
8759  * wm_gmii_i82543_writereg:	[mii interface function]
8760  *
8761  *	Write a PHY register on the GMII (i82543 version).
8762  */
8763 static void
8764 wm_gmii_i82543_writereg(device_t self, int phy, int reg, int val)
8765 {
8766 	struct wm_softc *sc = device_private(self);
8767 
8768 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
8769 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
8770 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
8771 	    (MII_COMMAND_START << 30), 32);
8772 }
8773 
8774 /*
8775  * wm_gmii_mdic_readreg:	[mii interface function]
8776  *
8777  *	Read a PHY register on the GMII.
8778  */
8779 static int
8780 wm_gmii_mdic_readreg(device_t self, int phy, int reg)
8781 {
8782 	struct wm_softc *sc = device_private(self);
8783 	uint32_t mdic = 0;
8784 	int i, rv;
8785 
8786 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
8787 	    MDIC_REGADD(reg));
8788 
8789 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8790 		mdic = CSR_READ(sc, WMREG_MDIC);
8791 		if (mdic & MDIC_READY)
8792 			break;
8793 		delay(50);
8794 	}
8795 
8796 	if ((mdic & MDIC_READY) == 0) {
8797 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
8798 		    device_xname(sc->sc_dev), phy, reg);
8799 		rv = 0;
8800 	} else if (mdic & MDIC_E) {
8801 #if 0 /* This is normal if no PHY is present. */
8802 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
8803 		    device_xname(sc->sc_dev), phy, reg);
8804 #endif
8805 		rv = 0;
8806 	} else {
8807 		rv = MDIC_DATA(mdic);
8808 		if (rv == 0xffff)
8809 			rv = 0;
8810 	}
8811 
8812 	return rv;
8813 }
8814 
8815 /*
8816  * wm_gmii_mdic_writereg:	[mii interface function]
8817  *
8818  *	Write a PHY register on the GMII.
8819  */
8820 static void
8821 wm_gmii_mdic_writereg(device_t self, int phy, int reg, int val)
8822 {
8823 	struct wm_softc *sc = device_private(self);
8824 	uint32_t mdic = 0;
8825 	int i;
8826 
8827 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
8828 	    MDIC_REGADD(reg) | MDIC_DATA(val));
8829 
8830 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
8831 		mdic = CSR_READ(sc, WMREG_MDIC);
8832 		if (mdic & MDIC_READY)
8833 			break;
8834 		delay(50);
8835 	}
8836 
8837 	if ((mdic & MDIC_READY) == 0)
8838 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
8839 		    device_xname(sc->sc_dev), phy, reg);
8840 	else if (mdic & MDIC_E)
8841 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
8842 		    device_xname(sc->sc_dev), phy, reg);
8843 }
8844 
8845 /*
8846  * wm_gmii_i82544_readreg:	[mii interface function]
8847  *
8848  *	Read a PHY register on the GMII.
8849  */
8850 static int
8851 wm_gmii_i82544_readreg(device_t self, int phy, int reg)
8852 {
8853 	struct wm_softc *sc = device_private(self);
8854 	int rv;
8855 
8856 	if (sc->phy.acquire(sc)) {
8857 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8858 		    __func__);
8859 		return 0;
8860 	}
8861 	rv = wm_gmii_mdic_readreg(self, phy, reg);
8862 	sc->phy.release(sc);
8863 
8864 	return rv;
8865 }
8866 
8867 /*
8868  * wm_gmii_i82544_writereg:	[mii interface function]
8869  *
8870  *	Write a PHY register on the GMII.
8871  */
8872 static void
8873 wm_gmii_i82544_writereg(device_t self, int phy, int reg, int val)
8874 {
8875 	struct wm_softc *sc = device_private(self);
8876 
8877 	if (sc->phy.acquire(sc)) {
8878 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8879 		    __func__);
8880 	}
8881 	wm_gmii_mdic_writereg(self, phy, reg, val);
8882 	sc->phy.release(sc);
8883 }
8884 
8885 /*
8886  * wm_gmii_i80003_readreg:	[mii interface function]
8887  *
8888  *	Read a PHY register on the kumeran
8889  * This could be handled by the PHY layer if we didn't have to lock the
8890  * ressource ...
8891  */
8892 static int
8893 wm_gmii_i80003_readreg(device_t self, int phy, int reg)
8894 {
8895 	struct wm_softc *sc = device_private(self);
8896 	int rv;
8897 
8898 	if (phy != 1) /* only one PHY on kumeran bus */
8899 		return 0;
8900 
8901 	if (sc->phy.acquire(sc)) {
8902 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8903 		    __func__);
8904 		return 0;
8905 	}
8906 
8907 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
8908 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8909 		    reg >> GG82563_PAGE_SHIFT);
8910 	} else {
8911 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8912 		    reg >> GG82563_PAGE_SHIFT);
8913 	}
8914 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8915 	delay(200);
8916 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
8917 	delay(200);
8918 	sc->phy.release(sc);
8919 
8920 	return rv;
8921 }
8922 
8923 /*
8924  * wm_gmii_i80003_writereg:	[mii interface function]
8925  *
8926  *	Write a PHY register on the kumeran.
8927  * This could be handled by the PHY layer if we didn't have to lock the
8928  * ressource ...
8929  */
8930 static void
8931 wm_gmii_i80003_writereg(device_t self, int phy, int reg, int val)
8932 {
8933 	struct wm_softc *sc = device_private(self);
8934 
8935 	if (phy != 1) /* only one PHY on kumeran bus */
8936 		return;
8937 
8938 	if (sc->phy.acquire(sc)) {
8939 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8940 		    __func__);
8941 		return;
8942 	}
8943 
8944 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG) {
8945 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT,
8946 		    reg >> GG82563_PAGE_SHIFT);
8947 	} else {
8948 		wm_gmii_mdic_writereg(self, phy, GG82563_PHY_PAGE_SELECT_ALT,
8949 		    reg >> GG82563_PAGE_SHIFT);
8950 	}
8951 	/* Wait more 200us for a bug of the ready bit in the MDIC register */
8952 	delay(200);
8953 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
8954 	delay(200);
8955 
8956 	sc->phy.release(sc);
8957 }
8958 
8959 /*
8960  * wm_gmii_bm_readreg:	[mii interface function]
8961  *
8962  *	Read a PHY register on the kumeran
8963  * This could be handled by the PHY layer if we didn't have to lock the
8964  * ressource ...
8965  */
8966 static int
8967 wm_gmii_bm_readreg(device_t self, int phy, int reg)
8968 {
8969 	struct wm_softc *sc = device_private(self);
8970 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
8971 	uint16_t val;
8972 	int rv;
8973 
8974 	if (sc->phy.acquire(sc)) {
8975 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
8976 		    __func__);
8977 		return 0;
8978 	}
8979 
8980 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
8981 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
8982 		    || (reg == 31)) ? 1 : phy;
8983 	/* Page 800 works differently than the rest so it has its own func */
8984 	if (page == BM_WUC_PAGE) {
8985 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
8986 		rv = val;
8987 		goto release;
8988 	}
8989 
8990 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
8991 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
8992 		    && (sc->sc_type != WM_T_82583))
8993 			wm_gmii_mdic_writereg(self, phy,
8994 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
8995 		else
8996 			wm_gmii_mdic_writereg(self, phy,
8997 			    BME1000_PHY_PAGE_SELECT, page);
8998 	}
8999 
9000 	rv = wm_gmii_mdic_readreg(self, phy, reg & MII_ADDRMASK);
9001 
9002 release:
9003 	sc->phy.release(sc);
9004 	return rv;
9005 }
9006 
9007 /*
9008  * wm_gmii_bm_writereg:	[mii interface function]
9009  *
9010  *	Write a PHY register on the kumeran.
9011  * This could be handled by the PHY layer if we didn't have to lock the
9012  * ressource ...
9013  */
9014 static void
9015 wm_gmii_bm_writereg(device_t self, int phy, int reg, int val)
9016 {
9017 	struct wm_softc *sc = device_private(self);
9018 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
9019 
9020 	if (sc->phy.acquire(sc)) {
9021 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9022 		    __func__);
9023 		return;
9024 	}
9025 
9026 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
9027 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
9028 		    || (reg == 31)) ? 1 : phy;
9029 	/* Page 800 works differently than the rest so it has its own func */
9030 	if (page == BM_WUC_PAGE) {
9031 		uint16_t tmp;
9032 
9033 		tmp = val;
9034 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
9035 		goto release;
9036 	}
9037 
9038 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
9039 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
9040 		    && (sc->sc_type != WM_T_82583))
9041 			wm_gmii_mdic_writereg(self, phy,
9042 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
9043 		else
9044 			wm_gmii_mdic_writereg(self, phy,
9045 			    BME1000_PHY_PAGE_SELECT, page);
9046 	}
9047 
9048 	wm_gmii_mdic_writereg(self, phy, reg & MII_ADDRMASK, val);
9049 
9050 release:
9051 	sc->phy.release(sc);
9052 }
9053 
9054 static void
9055 wm_access_phy_wakeup_reg_bm(device_t self, int offset, int16_t *val, int rd)
9056 {
9057 	struct wm_softc *sc = device_private(self);
9058 	uint16_t regnum = BM_PHY_REG_NUM(offset);
9059 	uint16_t wuce, reg;
9060 
9061 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9062 		device_xname(sc->sc_dev), __func__));
9063 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
9064 	if (sc->sc_type == WM_T_PCH) {
9065 		/* XXX e1000 driver do nothing... why? */
9066 	}
9067 
9068 	/*
9069 	 * 1) Enable PHY wakeup register first.
9070 	 * See e1000_enable_phy_wakeup_reg_access_bm().
9071 	 */
9072 
9073 	/* Set page 769 */
9074 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9075 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
9076 
9077 	/* Read WUCE and save it */
9078 	wuce = wm_gmii_mdic_readreg(self, 1, BM_WUC_ENABLE_REG);
9079 
9080 	reg = wuce | BM_WUC_ENABLE_BIT;
9081 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
9082 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, reg);
9083 
9084 	/* Select page 800 */
9085 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9086 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
9087 
9088 	/*
9089 	 * 2) Access PHY wakeup register.
9090 	 * See e1000_access_phy_wakeup_reg_bm.
9091 	 */
9092 
9093 	/* Write page 800 */
9094 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ADDRESS_OPCODE, regnum);
9095 
9096 	if (rd)
9097 		*val = wm_gmii_mdic_readreg(self, 1, BM_WUC_DATA_OPCODE);
9098 	else
9099 		wm_gmii_mdic_writereg(self, 1, BM_WUC_DATA_OPCODE, *val);
9100 
9101 	/*
9102 	 * 3) Disable PHY wakeup register.
9103 	 * See e1000_disable_phy_wakeup_reg_access_bm().
9104 	 */
9105 	/* Set page 769 */
9106 	wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9107 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
9108 
9109 	wm_gmii_mdic_writereg(self, 1, BM_WUC_ENABLE_REG, wuce);
9110 }
9111 
9112 /*
9113  * wm_gmii_hv_readreg:	[mii interface function]
9114  *
9115  *	Read a PHY register on the kumeran
9116  * This could be handled by the PHY layer if we didn't have to lock the
9117  * ressource ...
9118  */
9119 static int
9120 wm_gmii_hv_readreg(device_t self, int phy, int reg)
9121 {
9122 	struct wm_softc *sc = device_private(self);
9123 	int rv;
9124 
9125 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9126 		device_xname(sc->sc_dev), __func__));
9127 	if (sc->phy.acquire(sc)) {
9128 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9129 		    __func__);
9130 		return 0;
9131 	}
9132 
9133 	rv = wm_gmii_hv_readreg_locked(self, phy, reg);
9134 	sc->phy.release(sc);
9135 	return rv;
9136 }
9137 
9138 static int
9139 wm_gmii_hv_readreg_locked(device_t self, int phy, int reg)
9140 {
9141 	uint16_t page = BM_PHY_REG_PAGE(reg);
9142 	uint16_t regnum = BM_PHY_REG_NUM(reg);
9143 	uint16_t val;
9144 	int rv;
9145 
9146 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
9147 
9148 	/* Page 800 works differently than the rest so it has its own func */
9149 	if (page == BM_WUC_PAGE) {
9150 		wm_access_phy_wakeup_reg_bm(self, reg, &val, 1);
9151 		return val;
9152 	}
9153 
9154 	/*
9155 	 * Lower than page 768 works differently than the rest so it has its
9156 	 * own func
9157 	 */
9158 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
9159 		printf("gmii_hv_readreg!!!\n");
9160 		return 0;
9161 	}
9162 
9163 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
9164 		wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9165 		    page << BME1000_PAGE_SHIFT);
9166 	}
9167 
9168 	rv = wm_gmii_mdic_readreg(self, phy, regnum & MII_ADDRMASK);
9169 	return rv;
9170 }
9171 
9172 /*
9173  * wm_gmii_hv_writereg:	[mii interface function]
9174  *
9175  *	Write a PHY register on the kumeran.
9176  * This could be handled by the PHY layer if we didn't have to lock the
9177  * ressource ...
9178  */
9179 static void
9180 wm_gmii_hv_writereg(device_t self, int phy, int reg, int val)
9181 {
9182 	struct wm_softc *sc = device_private(self);
9183 
9184 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9185 		device_xname(sc->sc_dev), __func__));
9186 
9187 	if (sc->phy.acquire(sc)) {
9188 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9189 		    __func__);
9190 		return;
9191 	}
9192 
9193 	wm_gmii_hv_writereg_locked(self, phy, reg, val);
9194 	sc->phy.release(sc);
9195 }
9196 
9197 static void
9198 wm_gmii_hv_writereg_locked(device_t self, int phy, int reg, int val)
9199 {
9200 	struct wm_softc *sc = device_private(self);
9201 	uint16_t page = BM_PHY_REG_PAGE(reg);
9202 	uint16_t regnum = BM_PHY_REG_NUM(reg);
9203 
9204 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
9205 
9206 	/* Page 800 works differently than the rest so it has its own func */
9207 	if (page == BM_WUC_PAGE) {
9208 		uint16_t tmp;
9209 
9210 		tmp = val;
9211 		wm_access_phy_wakeup_reg_bm(self, reg, &tmp, 0);
9212 		return;
9213 	}
9214 
9215 	/*
9216 	 * Lower than page 768 works differently than the rest so it has its
9217 	 * own func
9218 	 */
9219 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
9220 		printf("gmii_hv_writereg!!!\n");
9221 		return;
9222 	}
9223 
9224 	{
9225 		/*
9226 		 * XXX Workaround MDIO accesses being disabled after entering
9227 		 * IEEE Power Down (whenever bit 11 of the PHY control
9228 		 * register is set)
9229 		 */
9230 		if (sc->sc_phytype == WMPHY_82578) {
9231 			struct mii_softc *child;
9232 
9233 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
9234 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
9235 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
9236 			    && ((val & (1 << 11)) != 0)) {
9237 				printf("XXX need workaround\n");
9238 			}
9239 		}
9240 
9241 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
9242 			wm_gmii_mdic_writereg(self, 1, MII_IGPHY_PAGE_SELECT,
9243 			    page << BME1000_PAGE_SHIFT);
9244 		}
9245 	}
9246 
9247 	wm_gmii_mdic_writereg(self, phy, regnum & MII_ADDRMASK, val);
9248 }
9249 
9250 /*
9251  * wm_gmii_82580_readreg:	[mii interface function]
9252  *
9253  *	Read a PHY register on the 82580 and I350.
9254  * This could be handled by the PHY layer if we didn't have to lock the
9255  * ressource ...
9256  */
9257 static int
9258 wm_gmii_82580_readreg(device_t self, int phy, int reg)
9259 {
9260 	struct wm_softc *sc = device_private(self);
9261 	int rv;
9262 
9263 	if (sc->phy.acquire(sc) != 0) {
9264 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9265 		    __func__);
9266 		return 0;
9267 	}
9268 
9269 	rv = wm_gmii_mdic_readreg(self, phy, reg);
9270 
9271 	sc->phy.release(sc);
9272 	return rv;
9273 }
9274 
9275 /*
9276  * wm_gmii_82580_writereg:	[mii interface function]
9277  *
9278  *	Write a PHY register on the 82580 and I350.
9279  * This could be handled by the PHY layer if we didn't have to lock the
9280  * ressource ...
9281  */
9282 static void
9283 wm_gmii_82580_writereg(device_t self, int phy, int reg, int val)
9284 {
9285 	struct wm_softc *sc = device_private(self);
9286 
9287 	if (sc->phy.acquire(sc) != 0) {
9288 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9289 		    __func__);
9290 		return;
9291 	}
9292 
9293 	wm_gmii_mdic_writereg(self, phy, reg, val);
9294 
9295 	sc->phy.release(sc);
9296 }
9297 
9298 /*
9299  * wm_gmii_gs40g_readreg:	[mii interface function]
9300  *
9301  *	Read a PHY register on the I2100 and I211.
9302  * This could be handled by the PHY layer if we didn't have to lock the
9303  * ressource ...
9304  */
9305 static int
9306 wm_gmii_gs40g_readreg(device_t self, int phy, int reg)
9307 {
9308 	struct wm_softc *sc = device_private(self);
9309 	int page, offset;
9310 	int rv;
9311 
9312 	/* Acquire semaphore */
9313 	if (sc->phy.acquire(sc)) {
9314 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9315 		    __func__);
9316 		return 0;
9317 	}
9318 
9319 	/* Page select */
9320 	page = reg >> GS40G_PAGE_SHIFT;
9321 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9322 
9323 	/* Read reg */
9324 	offset = reg & GS40G_OFFSET_MASK;
9325 	rv = wm_gmii_mdic_readreg(self, phy, offset);
9326 
9327 	sc->phy.release(sc);
9328 	return rv;
9329 }
9330 
9331 /*
9332  * wm_gmii_gs40g_writereg:	[mii interface function]
9333  *
9334  *	Write a PHY register on the I210 and I211.
9335  * This could be handled by the PHY layer if we didn't have to lock the
9336  * ressource ...
9337  */
9338 static void
9339 wm_gmii_gs40g_writereg(device_t self, int phy, int reg, int val)
9340 {
9341 	struct wm_softc *sc = device_private(self);
9342 	int page, offset;
9343 
9344 	/* Acquire semaphore */
9345 	if (sc->phy.acquire(sc)) {
9346 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9347 		    __func__);
9348 		return;
9349 	}
9350 
9351 	/* Page select */
9352 	page = reg >> GS40G_PAGE_SHIFT;
9353 	wm_gmii_mdic_writereg(self, phy, GS40G_PAGE_SELECT, page);
9354 
9355 	/* Write reg */
9356 	offset = reg & GS40G_OFFSET_MASK;
9357 	wm_gmii_mdic_writereg(self, phy, offset, val);
9358 
9359 	/* Release semaphore */
9360 	sc->phy.release(sc);
9361 }
9362 
9363 /*
9364  * wm_gmii_statchg:	[mii interface function]
9365  *
9366  *	Callback from MII layer when media changes.
9367  */
9368 static void
9369 wm_gmii_statchg(struct ifnet *ifp)
9370 {
9371 	struct wm_softc *sc = ifp->if_softc;
9372 	struct mii_data *mii = &sc->sc_mii;
9373 
9374 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
9375 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9376 	sc->sc_fcrtl &= ~FCRTL_XONE;
9377 
9378 	/*
9379 	 * Get flow control negotiation result.
9380 	 */
9381 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
9382 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
9383 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
9384 		mii->mii_media_active &= ~IFM_ETH_FMASK;
9385 	}
9386 
9387 	if (sc->sc_flowflags & IFM_FLOW) {
9388 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
9389 			sc->sc_ctrl |= CTRL_TFCE;
9390 			sc->sc_fcrtl |= FCRTL_XONE;
9391 		}
9392 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
9393 			sc->sc_ctrl |= CTRL_RFCE;
9394 	}
9395 
9396 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
9397 		DPRINTF(WM_DEBUG_LINK,
9398 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
9399 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9400 	} else {
9401 		DPRINTF(WM_DEBUG_LINK,
9402 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
9403 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9404 	}
9405 
9406 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9407 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9408 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
9409 						 : WMREG_FCRTL, sc->sc_fcrtl);
9410 	if (sc->sc_type == WM_T_80003) {
9411 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
9412 		case IFM_1000_T:
9413 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9414 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
9415 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
9416 			break;
9417 		default:
9418 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
9419 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
9420 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
9421 			break;
9422 		}
9423 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
9424 	}
9425 }
9426 
9427 /* kumeran related (80003, ICH* and PCH*) */
9428 
9429 /*
9430  * wm_kmrn_readreg:
9431  *
9432  *	Read a kumeran register
9433  */
9434 static int
9435 wm_kmrn_readreg(struct wm_softc *sc, int reg)
9436 {
9437 	int rv;
9438 
9439 	if (sc->sc_type == WM_T_80003)
9440 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9441 	else
9442 		rv = sc->phy.acquire(sc);
9443 	if (rv != 0) {
9444 		aprint_error_dev(sc->sc_dev,
9445 		    "%s: failed to get semaphore\n", __func__);
9446 		return 0;
9447 	}
9448 
9449 	rv = wm_kmrn_readreg_locked(sc, reg);
9450 
9451 	if (sc->sc_type == WM_T_80003)
9452 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9453 	else
9454 		sc->phy.release(sc);
9455 
9456 	return rv;
9457 }
9458 
9459 static int
9460 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg)
9461 {
9462 	int rv;
9463 
9464 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9465 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9466 	    KUMCTRLSTA_REN);
9467 	CSR_WRITE_FLUSH(sc);
9468 	delay(2);
9469 
9470 	rv = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
9471 
9472 	return rv;
9473 }
9474 
9475 /*
9476  * wm_kmrn_writereg:
9477  *
9478  *	Write a kumeran register
9479  */
9480 static void
9481 wm_kmrn_writereg(struct wm_softc *sc, int reg, int val)
9482 {
9483 	int rv;
9484 
9485 	if (sc->sc_type == WM_T_80003)
9486 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9487 	else
9488 		rv = sc->phy.acquire(sc);
9489 	if (rv != 0) {
9490 		aprint_error_dev(sc->sc_dev,
9491 		    "%s: failed to get semaphore\n", __func__);
9492 		return;
9493 	}
9494 
9495 	wm_kmrn_writereg_locked(sc, reg, val);
9496 
9497 	if (sc->sc_type == WM_T_80003)
9498 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
9499 	else
9500 		sc->phy.release(sc);
9501 }
9502 
9503 static void
9504 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, int val)
9505 {
9506 
9507 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
9508 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
9509 	    (val & KUMCTRLSTA_MASK));
9510 }
9511 
9512 /* SGMII related */
9513 
9514 /*
9515  * wm_sgmii_uses_mdio
9516  *
9517  * Check whether the transaction is to the internal PHY or the external
9518  * MDIO interface. Return true if it's MDIO.
9519  */
9520 static bool
9521 wm_sgmii_uses_mdio(struct wm_softc *sc)
9522 {
9523 	uint32_t reg;
9524 	bool ismdio = false;
9525 
9526 	switch (sc->sc_type) {
9527 	case WM_T_82575:
9528 	case WM_T_82576:
9529 		reg = CSR_READ(sc, WMREG_MDIC);
9530 		ismdio = ((reg & MDIC_DEST) != 0);
9531 		break;
9532 	case WM_T_82580:
9533 	case WM_T_I350:
9534 	case WM_T_I354:
9535 	case WM_T_I210:
9536 	case WM_T_I211:
9537 		reg = CSR_READ(sc, WMREG_MDICNFG);
9538 		ismdio = ((reg & MDICNFG_DEST) != 0);
9539 		break;
9540 	default:
9541 		break;
9542 	}
9543 
9544 	return ismdio;
9545 }
9546 
9547 /*
9548  * wm_sgmii_readreg:	[mii interface function]
9549  *
9550  *	Read a PHY register on the SGMII
9551  * This could be handled by the PHY layer if we didn't have to lock the
9552  * ressource ...
9553  */
9554 static int
9555 wm_sgmii_readreg(device_t self, int phy, int reg)
9556 {
9557 	struct wm_softc *sc = device_private(self);
9558 	uint32_t i2ccmd;
9559 	int i, rv;
9560 
9561 	if (sc->phy.acquire(sc)) {
9562 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9563 		    __func__);
9564 		return 0;
9565 	}
9566 
9567 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9568 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9569 	    | I2CCMD_OPCODE_READ;
9570 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9571 
9572 	/* Poll the ready bit */
9573 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9574 		delay(50);
9575 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9576 		if (i2ccmd & I2CCMD_READY)
9577 			break;
9578 	}
9579 	if ((i2ccmd & I2CCMD_READY) == 0)
9580 		aprint_error_dev(sc->sc_dev, "I2CCMD Read did not complete\n");
9581 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9582 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9583 
9584 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
9585 
9586 	sc->phy.release(sc);
9587 	return rv;
9588 }
9589 
9590 /*
9591  * wm_sgmii_writereg:	[mii interface function]
9592  *
9593  *	Write a PHY register on the SGMII.
9594  * This could be handled by the PHY layer if we didn't have to lock the
9595  * ressource ...
9596  */
9597 static void
9598 wm_sgmii_writereg(device_t self, int phy, int reg, int val)
9599 {
9600 	struct wm_softc *sc = device_private(self);
9601 	uint32_t i2ccmd;
9602 	int i;
9603 	int val_swapped;
9604 
9605 	if (sc->phy.acquire(sc) != 0) {
9606 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9607 		    __func__);
9608 		return;
9609 	}
9610 	/* Swap the data bytes for the I2C interface */
9611 	val_swapped = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
9612 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
9613 	    | (phy << I2CCMD_PHY_ADDR_SHIFT)
9614 	    | I2CCMD_OPCODE_WRITE | val_swapped;
9615 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
9616 
9617 	/* Poll the ready bit */
9618 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
9619 		delay(50);
9620 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
9621 		if (i2ccmd & I2CCMD_READY)
9622 			break;
9623 	}
9624 	if ((i2ccmd & I2CCMD_READY) == 0)
9625 		aprint_error_dev(sc->sc_dev, "I2CCMD Write did not complete\n");
9626 	if ((i2ccmd & I2CCMD_ERROR) != 0)
9627 		aprint_error_dev(sc->sc_dev, "I2CCMD Error bit set\n");
9628 
9629 	sc->phy.release(sc);
9630 }
9631 
9632 /* TBI related */
9633 
9634 /*
9635  * wm_tbi_mediainit:
9636  *
9637  *	Initialize media for use on 1000BASE-X devices.
9638  */
9639 static void
9640 wm_tbi_mediainit(struct wm_softc *sc)
9641 {
9642 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9643 	const char *sep = "";
9644 
9645 	if (sc->sc_type < WM_T_82543)
9646 		sc->sc_tipg = TIPG_WM_DFLT;
9647 	else
9648 		sc->sc_tipg = TIPG_LG_DFLT;
9649 
9650 	sc->sc_tbi_serdes_anegticks = 5;
9651 
9652 	/* Initialize our media structures */
9653 	sc->sc_mii.mii_ifp = ifp;
9654 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
9655 
9656 	if ((sc->sc_type >= WM_T_82575)
9657 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
9658 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9659 		    wm_serdes_mediachange, wm_serdes_mediastatus);
9660 	else
9661 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
9662 		    wm_tbi_mediachange, wm_tbi_mediastatus);
9663 
9664 	/*
9665 	 * SWD Pins:
9666 	 *
9667 	 *	0 = Link LED (output)
9668 	 *	1 = Loss Of Signal (input)
9669 	 */
9670 	sc->sc_ctrl |= CTRL_SWDPIO(0);
9671 
9672 	/* XXX Perhaps this is only for TBI */
9673 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
9674 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
9675 
9676 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
9677 		sc->sc_ctrl &= ~CTRL_LRST;
9678 
9679 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9680 
9681 #define	ADD(ss, mm, dd)							\
9682 do {									\
9683 	aprint_normal("%s%s", sep, ss);					\
9684 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
9685 	sep = ", ";							\
9686 } while (/*CONSTCOND*/0)
9687 
9688 	aprint_normal_dev(sc->sc_dev, "");
9689 
9690 	if (sc->sc_type == WM_T_I354) {
9691 		uint32_t status;
9692 
9693 		status = CSR_READ(sc, WMREG_STATUS);
9694 		if (((status & STATUS_2P5_SKU) != 0)
9695 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
9696 			ADD("2500baseKX-FDX", IFM_2500_SX | IFM_FDX,ANAR_X_FD);
9697 		} else
9698 			ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX,ANAR_X_FD);
9699 	} else if (sc->sc_type == WM_T_82545) {
9700 		/* Only 82545 is LX (XXX except SFP) */
9701 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
9702 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
9703 	} else {
9704 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
9705 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
9706 	}
9707 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
9708 	aprint_normal("\n");
9709 
9710 #undef ADD
9711 
9712 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
9713 }
9714 
9715 /*
9716  * wm_tbi_mediachange:	[ifmedia interface function]
9717  *
9718  *	Set hardware to newly-selected media on a 1000BASE-X device.
9719  */
9720 static int
9721 wm_tbi_mediachange(struct ifnet *ifp)
9722 {
9723 	struct wm_softc *sc = ifp->if_softc;
9724 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9725 	uint32_t status;
9726 	int i;
9727 
9728 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9729 		/* XXX need some work for >= 82571 and < 82575 */
9730 		if (sc->sc_type < WM_T_82575)
9731 			return 0;
9732 	}
9733 
9734 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
9735 	    || (sc->sc_type >= WM_T_82575))
9736 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
9737 
9738 	sc->sc_ctrl &= ~CTRL_LRST;
9739 	sc->sc_txcw = TXCW_ANE;
9740 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9741 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
9742 	else if (ife->ifm_media & IFM_FDX)
9743 		sc->sc_txcw |= TXCW_FD;
9744 	else
9745 		sc->sc_txcw |= TXCW_HD;
9746 
9747 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
9748 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
9749 
9750 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
9751 		    device_xname(sc->sc_dev), sc->sc_txcw));
9752 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9753 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9754 	CSR_WRITE_FLUSH(sc);
9755 	delay(1000);
9756 
9757 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
9758 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
9759 
9760 	/*
9761 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
9762 	 * optics detect a signal, 0 if they don't.
9763 	 */
9764 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
9765 		/* Have signal; wait for the link to come up. */
9766 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
9767 			delay(10000);
9768 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
9769 				break;
9770 		}
9771 
9772 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
9773 			    device_xname(sc->sc_dev),i));
9774 
9775 		status = CSR_READ(sc, WMREG_STATUS);
9776 		DPRINTF(WM_DEBUG_LINK,
9777 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
9778 			device_xname(sc->sc_dev),status, STATUS_LU));
9779 		if (status & STATUS_LU) {
9780 			/* Link is up. */
9781 			DPRINTF(WM_DEBUG_LINK,
9782 			    ("%s: LINK: set media -> link up %s\n",
9783 			    device_xname(sc->sc_dev),
9784 			    (status & STATUS_FD) ? "FDX" : "HDX"));
9785 
9786 			/*
9787 			 * NOTE: CTRL will update TFCE and RFCE automatically,
9788 			 * so we should update sc->sc_ctrl
9789 			 */
9790 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
9791 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
9792 			sc->sc_fcrtl &= ~FCRTL_XONE;
9793 			if (status & STATUS_FD)
9794 				sc->sc_tctl |=
9795 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
9796 			else
9797 				sc->sc_tctl |=
9798 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
9799 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
9800 				sc->sc_fcrtl |= FCRTL_XONE;
9801 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
9802 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
9803 				      WMREG_OLD_FCRTL : WMREG_FCRTL,
9804 				      sc->sc_fcrtl);
9805 			sc->sc_tbi_linkup = 1;
9806 		} else {
9807 			if (i == WM_LINKUP_TIMEOUT)
9808 				wm_check_for_link(sc);
9809 			/* Link is down. */
9810 			DPRINTF(WM_DEBUG_LINK,
9811 			    ("%s: LINK: set media -> link down\n",
9812 			    device_xname(sc->sc_dev)));
9813 			sc->sc_tbi_linkup = 0;
9814 		}
9815 	} else {
9816 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
9817 		    device_xname(sc->sc_dev)));
9818 		sc->sc_tbi_linkup = 0;
9819 	}
9820 
9821 	wm_tbi_serdes_set_linkled(sc);
9822 
9823 	return 0;
9824 }
9825 
9826 /*
9827  * wm_tbi_mediastatus:	[ifmedia interface function]
9828  *
9829  *	Get the current interface media status on a 1000BASE-X device.
9830  */
9831 static void
9832 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9833 {
9834 	struct wm_softc *sc = ifp->if_softc;
9835 	uint32_t ctrl, status;
9836 
9837 	ifmr->ifm_status = IFM_AVALID;
9838 	ifmr->ifm_active = IFM_ETHER;
9839 
9840 	status = CSR_READ(sc, WMREG_STATUS);
9841 	if ((status & STATUS_LU) == 0) {
9842 		ifmr->ifm_active |= IFM_NONE;
9843 		return;
9844 	}
9845 
9846 	ifmr->ifm_status |= IFM_ACTIVE;
9847 	/* Only 82545 is LX */
9848 	if (sc->sc_type == WM_T_82545)
9849 		ifmr->ifm_active |= IFM_1000_LX;
9850 	else
9851 		ifmr->ifm_active |= IFM_1000_SX;
9852 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
9853 		ifmr->ifm_active |= IFM_FDX;
9854 	else
9855 		ifmr->ifm_active |= IFM_HDX;
9856 	ctrl = CSR_READ(sc, WMREG_CTRL);
9857 	if (ctrl & CTRL_RFCE)
9858 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
9859 	if (ctrl & CTRL_TFCE)
9860 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
9861 }
9862 
9863 /* XXX TBI only */
9864 static int
9865 wm_check_for_link(struct wm_softc *sc)
9866 {
9867 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9868 	uint32_t rxcw;
9869 	uint32_t ctrl;
9870 	uint32_t status;
9871 	uint32_t sig;
9872 
9873 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
9874 		/* XXX need some work for >= 82571 */
9875 		if (sc->sc_type >= WM_T_82571) {
9876 			sc->sc_tbi_linkup = 1;
9877 			return 0;
9878 		}
9879 	}
9880 
9881 	rxcw = CSR_READ(sc, WMREG_RXCW);
9882 	ctrl = CSR_READ(sc, WMREG_CTRL);
9883 	status = CSR_READ(sc, WMREG_STATUS);
9884 
9885 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
9886 
9887 	DPRINTF(WM_DEBUG_LINK,
9888 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
9889 		device_xname(sc->sc_dev), __func__,
9890 		((ctrl & CTRL_SWDPIN(1)) == sig),
9891 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
9892 
9893 	/*
9894 	 * SWDPIN   LU RXCW
9895 	 *      0    0    0
9896 	 *      0    0    1	(should not happen)
9897 	 *      0    1    0	(should not happen)
9898 	 *      0    1    1	(should not happen)
9899 	 *      1    0    0	Disable autonego and force linkup
9900 	 *      1    0    1	got /C/ but not linkup yet
9901 	 *      1    1    0	(linkup)
9902 	 *      1    1    1	If IFM_AUTO, back to autonego
9903 	 *
9904 	 */
9905 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
9906 	    && ((status & STATUS_LU) == 0)
9907 	    && ((rxcw & RXCW_C) == 0)) {
9908 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
9909 			__func__));
9910 		sc->sc_tbi_linkup = 0;
9911 		/* Disable auto-negotiation in the TXCW register */
9912 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
9913 
9914 		/*
9915 		 * Force link-up and also force full-duplex.
9916 		 *
9917 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
9918 		 * so we should update sc->sc_ctrl
9919 		 */
9920 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
9921 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9922 	} else if (((status & STATUS_LU) != 0)
9923 	    && ((rxcw & RXCW_C) != 0)
9924 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
9925 		sc->sc_tbi_linkup = 1;
9926 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
9927 			__func__));
9928 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
9929 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
9930 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
9931 	    && ((rxcw & RXCW_C) != 0)) {
9932 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
9933 	} else {
9934 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
9935 			status));
9936 	}
9937 
9938 	return 0;
9939 }
9940 
9941 /*
9942  * wm_tbi_tick:
9943  *
9944  *	Check the link on TBI devices.
9945  *	This function acts as mii_tick().
9946  */
9947 static void
9948 wm_tbi_tick(struct wm_softc *sc)
9949 {
9950 	struct mii_data *mii = &sc->sc_mii;
9951 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
9952 	uint32_t status;
9953 
9954 	KASSERT(WM_CORE_LOCKED(sc));
9955 
9956 	status = CSR_READ(sc, WMREG_STATUS);
9957 
9958 	/* XXX is this needed? */
9959 	(void)CSR_READ(sc, WMREG_RXCW);
9960 	(void)CSR_READ(sc, WMREG_CTRL);
9961 
9962 	/* set link status */
9963 	if ((status & STATUS_LU) == 0) {
9964 		DPRINTF(WM_DEBUG_LINK,
9965 		    ("%s: LINK: checklink -> down\n",
9966 			device_xname(sc->sc_dev)));
9967 		sc->sc_tbi_linkup = 0;
9968 	} else if (sc->sc_tbi_linkup == 0) {
9969 		DPRINTF(WM_DEBUG_LINK,
9970 		    ("%s: LINK: checklink -> up %s\n",
9971 			device_xname(sc->sc_dev),
9972 			(status & STATUS_FD) ? "FDX" : "HDX"));
9973 		sc->sc_tbi_linkup = 1;
9974 		sc->sc_tbi_serdes_ticks = 0;
9975 	}
9976 
9977 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
9978 		goto setled;
9979 
9980 	if ((status & STATUS_LU) == 0) {
9981 		sc->sc_tbi_linkup = 0;
9982 		/* If the timer expired, retry autonegotiation */
9983 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9984 		    && (++sc->sc_tbi_serdes_ticks
9985 			>= sc->sc_tbi_serdes_anegticks)) {
9986 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
9987 			sc->sc_tbi_serdes_ticks = 0;
9988 			/*
9989 			 * Reset the link, and let autonegotiation do
9990 			 * its thing
9991 			 */
9992 			sc->sc_ctrl |= CTRL_LRST;
9993 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9994 			CSR_WRITE_FLUSH(sc);
9995 			delay(1000);
9996 			sc->sc_ctrl &= ~CTRL_LRST;
9997 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9998 			CSR_WRITE_FLUSH(sc);
9999 			delay(1000);
10000 			CSR_WRITE(sc, WMREG_TXCW,
10001 			    sc->sc_txcw & ~TXCW_ANE);
10002 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
10003 		}
10004 	}
10005 
10006 setled:
10007 	wm_tbi_serdes_set_linkled(sc);
10008 }
10009 
10010 /* SERDES related */
10011 static void
10012 wm_serdes_power_up_link_82575(struct wm_softc *sc)
10013 {
10014 	uint32_t reg;
10015 
10016 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
10017 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
10018 		return;
10019 
10020 	reg = CSR_READ(sc, WMREG_PCS_CFG);
10021 	reg |= PCS_CFG_PCS_EN;
10022 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
10023 
10024 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
10025 	reg &= ~CTRL_EXT_SWDPIN(3);
10026 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
10027 	CSR_WRITE_FLUSH(sc);
10028 }
10029 
10030 static int
10031 wm_serdes_mediachange(struct ifnet *ifp)
10032 {
10033 	struct wm_softc *sc = ifp->if_softc;
10034 	bool pcs_autoneg = true; /* XXX */
10035 	uint32_t ctrl_ext, pcs_lctl, reg;
10036 
10037 	/* XXX Currently, this function is not called on 8257[12] */
10038 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
10039 	    || (sc->sc_type >= WM_T_82575))
10040 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
10041 
10042 	wm_serdes_power_up_link_82575(sc);
10043 
10044 	sc->sc_ctrl |= CTRL_SLU;
10045 
10046 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
10047 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
10048 
10049 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10050 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
10051 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
10052 	case CTRL_EXT_LINK_MODE_SGMII:
10053 		pcs_autoneg = true;
10054 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
10055 		break;
10056 	case CTRL_EXT_LINK_MODE_1000KX:
10057 		pcs_autoneg = false;
10058 		/* FALLTHROUGH */
10059 	default:
10060 		if ((sc->sc_type == WM_T_82575)
10061 		    || (sc->sc_type == WM_T_82576)) {
10062 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
10063 				pcs_autoneg = false;
10064 		}
10065 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
10066 		    | CTRL_FRCFDX;
10067 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
10068 	}
10069 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10070 
10071 	if (pcs_autoneg) {
10072 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
10073 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
10074 
10075 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
10076 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
10077 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
10078 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
10079 	} else
10080 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
10081 
10082 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
10083 
10084 
10085 	return 0;
10086 }
10087 
10088 static void
10089 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
10090 {
10091 	struct wm_softc *sc = ifp->if_softc;
10092 	struct mii_data *mii = &sc->sc_mii;
10093 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
10094 	uint32_t pcs_adv, pcs_lpab, reg;
10095 
10096 	ifmr->ifm_status = IFM_AVALID;
10097 	ifmr->ifm_active = IFM_ETHER;
10098 
10099 	/* Check PCS */
10100 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
10101 	if ((reg & PCS_LSTS_LINKOK) == 0) {
10102 		ifmr->ifm_active |= IFM_NONE;
10103 		sc->sc_tbi_linkup = 0;
10104 		goto setled;
10105 	}
10106 
10107 	sc->sc_tbi_linkup = 1;
10108 	ifmr->ifm_status |= IFM_ACTIVE;
10109 	if (sc->sc_type == WM_T_I354) {
10110 		uint32_t status;
10111 
10112 		status = CSR_READ(sc, WMREG_STATUS);
10113 		if (((status & STATUS_2P5_SKU) != 0)
10114 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
10115 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
10116 		} else
10117 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
10118 	} else {
10119 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
10120 		case PCS_LSTS_SPEED_10:
10121 			ifmr->ifm_active |= IFM_10_T; /* XXX */
10122 			break;
10123 		case PCS_LSTS_SPEED_100:
10124 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
10125 			break;
10126 		case PCS_LSTS_SPEED_1000:
10127 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
10128 			break;
10129 		default:
10130 			device_printf(sc->sc_dev, "Unknown speed\n");
10131 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
10132 			break;
10133 		}
10134 	}
10135 	if ((reg & PCS_LSTS_FDX) != 0)
10136 		ifmr->ifm_active |= IFM_FDX;
10137 	else
10138 		ifmr->ifm_active |= IFM_HDX;
10139 	mii->mii_media_active &= ~IFM_ETH_FMASK;
10140 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
10141 		/* Check flow */
10142 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
10143 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
10144 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
10145 			goto setled;
10146 		}
10147 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
10148 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
10149 		DPRINTF(WM_DEBUG_LINK,
10150 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
10151 		if ((pcs_adv & TXCW_SYM_PAUSE)
10152 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
10153 			mii->mii_media_active |= IFM_FLOW
10154 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
10155 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
10156 		    && (pcs_adv & TXCW_ASYM_PAUSE)
10157 		    && (pcs_lpab & TXCW_SYM_PAUSE)
10158 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
10159 			mii->mii_media_active |= IFM_FLOW
10160 			    | IFM_ETH_TXPAUSE;
10161 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
10162 		    && (pcs_adv & TXCW_ASYM_PAUSE)
10163 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
10164 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
10165 			mii->mii_media_active |= IFM_FLOW
10166 			    | IFM_ETH_RXPAUSE;
10167 		}
10168 	}
10169 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
10170 	    | (mii->mii_media_active & IFM_ETH_FMASK);
10171 setled:
10172 	wm_tbi_serdes_set_linkled(sc);
10173 }
10174 
10175 /*
10176  * wm_serdes_tick:
10177  *
10178  *	Check the link on serdes devices.
10179  */
10180 static void
10181 wm_serdes_tick(struct wm_softc *sc)
10182 {
10183 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10184 	struct mii_data *mii = &sc->sc_mii;
10185 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
10186 	uint32_t reg;
10187 
10188 	KASSERT(WM_CORE_LOCKED(sc));
10189 
10190 	mii->mii_media_status = IFM_AVALID;
10191 	mii->mii_media_active = IFM_ETHER;
10192 
10193 	/* Check PCS */
10194 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
10195 	if ((reg & PCS_LSTS_LINKOK) != 0) {
10196 		mii->mii_media_status |= IFM_ACTIVE;
10197 		sc->sc_tbi_linkup = 1;
10198 		sc->sc_tbi_serdes_ticks = 0;
10199 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
10200 		if ((reg & PCS_LSTS_FDX) != 0)
10201 			mii->mii_media_active |= IFM_FDX;
10202 		else
10203 			mii->mii_media_active |= IFM_HDX;
10204 	} else {
10205 		mii->mii_media_status |= IFM_NONE;
10206 		sc->sc_tbi_linkup = 0;
10207 		/* If the timer expired, retry autonegotiation */
10208 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
10209 		    && (++sc->sc_tbi_serdes_ticks
10210 			>= sc->sc_tbi_serdes_anegticks)) {
10211 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
10212 			sc->sc_tbi_serdes_ticks = 0;
10213 			/* XXX */
10214 			wm_serdes_mediachange(ifp);
10215 		}
10216 	}
10217 
10218 	wm_tbi_serdes_set_linkled(sc);
10219 }
10220 
10221 /* SFP related */
10222 
10223 static int
10224 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
10225 {
10226 	uint32_t i2ccmd;
10227 	int i;
10228 
10229 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
10230 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10231 
10232 	/* Poll the ready bit */
10233 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10234 		delay(50);
10235 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10236 		if (i2ccmd & I2CCMD_READY)
10237 			break;
10238 	}
10239 	if ((i2ccmd & I2CCMD_READY) == 0)
10240 		return -1;
10241 	if ((i2ccmd & I2CCMD_ERROR) != 0)
10242 		return -1;
10243 
10244 	*data = i2ccmd & 0x00ff;
10245 
10246 	return 0;
10247 }
10248 
10249 static uint32_t
10250 wm_sfp_get_media_type(struct wm_softc *sc)
10251 {
10252 	uint32_t ctrl_ext;
10253 	uint8_t val = 0;
10254 	int timeout = 3;
10255 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
10256 	int rv = -1;
10257 
10258 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
10259 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
10260 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
10261 	CSR_WRITE_FLUSH(sc);
10262 
10263 	/* Read SFP module data */
10264 	while (timeout) {
10265 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
10266 		if (rv == 0)
10267 			break;
10268 		delay(100*1000); /* XXX too big */
10269 		timeout--;
10270 	}
10271 	if (rv != 0)
10272 		goto out;
10273 	switch (val) {
10274 	case SFF_SFP_ID_SFF:
10275 		aprint_normal_dev(sc->sc_dev,
10276 		    "Module/Connector soldered to board\n");
10277 		break;
10278 	case SFF_SFP_ID_SFP:
10279 		aprint_normal_dev(sc->sc_dev, "SFP\n");
10280 		break;
10281 	case SFF_SFP_ID_UNKNOWN:
10282 		goto out;
10283 	default:
10284 		break;
10285 	}
10286 
10287 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
10288 	if (rv != 0) {
10289 		goto out;
10290 	}
10291 
10292 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
10293 		mediatype = WM_MEDIATYPE_SERDES;
10294 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0){
10295 		sc->sc_flags |= WM_F_SGMII;
10296 		mediatype = WM_MEDIATYPE_COPPER;
10297 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0){
10298 		sc->sc_flags |= WM_F_SGMII;
10299 		mediatype = WM_MEDIATYPE_SERDES;
10300 	}
10301 
10302 out:
10303 	/* Restore I2C interface setting */
10304 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
10305 
10306 	return mediatype;
10307 }
10308 
10309 /*
10310  * NVM related.
10311  * Microwire, SPI (w/wo EERD) and Flash.
10312  */
10313 
10314 /* Both spi and uwire */
10315 
10316 /*
10317  * wm_eeprom_sendbits:
10318  *
10319  *	Send a series of bits to the EEPROM.
10320  */
10321 static void
10322 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
10323 {
10324 	uint32_t reg;
10325 	int x;
10326 
10327 	reg = CSR_READ(sc, WMREG_EECD);
10328 
10329 	for (x = nbits; x > 0; x--) {
10330 		if (bits & (1U << (x - 1)))
10331 			reg |= EECD_DI;
10332 		else
10333 			reg &= ~EECD_DI;
10334 		CSR_WRITE(sc, WMREG_EECD, reg);
10335 		CSR_WRITE_FLUSH(sc);
10336 		delay(2);
10337 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10338 		CSR_WRITE_FLUSH(sc);
10339 		delay(2);
10340 		CSR_WRITE(sc, WMREG_EECD, reg);
10341 		CSR_WRITE_FLUSH(sc);
10342 		delay(2);
10343 	}
10344 }
10345 
10346 /*
10347  * wm_eeprom_recvbits:
10348  *
10349  *	Receive a series of bits from the EEPROM.
10350  */
10351 static void
10352 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
10353 {
10354 	uint32_t reg, val;
10355 	int x;
10356 
10357 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
10358 
10359 	val = 0;
10360 	for (x = nbits; x > 0; x--) {
10361 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
10362 		CSR_WRITE_FLUSH(sc);
10363 		delay(2);
10364 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
10365 			val |= (1U << (x - 1));
10366 		CSR_WRITE(sc, WMREG_EECD, reg);
10367 		CSR_WRITE_FLUSH(sc);
10368 		delay(2);
10369 	}
10370 	*valp = val;
10371 }
10372 
10373 /* Microwire */
10374 
10375 /*
10376  * wm_nvm_read_uwire:
10377  *
10378  *	Read a word from the EEPROM using the MicroWire protocol.
10379  */
10380 static int
10381 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10382 {
10383 	uint32_t reg, val;
10384 	int i;
10385 
10386 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10387 		device_xname(sc->sc_dev), __func__));
10388 
10389 	for (i = 0; i < wordcnt; i++) {
10390 		/* Clear SK and DI. */
10391 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
10392 		CSR_WRITE(sc, WMREG_EECD, reg);
10393 
10394 		/*
10395 		 * XXX: workaround for a bug in qemu-0.12.x and prior
10396 		 * and Xen.
10397 		 *
10398 		 * We use this workaround only for 82540 because qemu's
10399 		 * e1000 act as 82540.
10400 		 */
10401 		if (sc->sc_type == WM_T_82540) {
10402 			reg |= EECD_SK;
10403 			CSR_WRITE(sc, WMREG_EECD, reg);
10404 			reg &= ~EECD_SK;
10405 			CSR_WRITE(sc, WMREG_EECD, reg);
10406 			CSR_WRITE_FLUSH(sc);
10407 			delay(2);
10408 		}
10409 		/* XXX: end of workaround */
10410 
10411 		/* Set CHIP SELECT. */
10412 		reg |= EECD_CS;
10413 		CSR_WRITE(sc, WMREG_EECD, reg);
10414 		CSR_WRITE_FLUSH(sc);
10415 		delay(2);
10416 
10417 		/* Shift in the READ command. */
10418 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
10419 
10420 		/* Shift in address. */
10421 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
10422 
10423 		/* Shift out the data. */
10424 		wm_eeprom_recvbits(sc, &val, 16);
10425 		data[i] = val & 0xffff;
10426 
10427 		/* Clear CHIP SELECT. */
10428 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
10429 		CSR_WRITE(sc, WMREG_EECD, reg);
10430 		CSR_WRITE_FLUSH(sc);
10431 		delay(2);
10432 	}
10433 
10434 	return 0;
10435 }
10436 
10437 /* SPI */
10438 
10439 /*
10440  * Set SPI and FLASH related information from the EECD register.
10441  * For 82541 and 82547, the word size is taken from EEPROM.
10442  */
10443 static int
10444 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
10445 {
10446 	int size;
10447 	uint32_t reg;
10448 	uint16_t data;
10449 
10450 	reg = CSR_READ(sc, WMREG_EECD);
10451 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
10452 
10453 	/* Read the size of NVM from EECD by default */
10454 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10455 	switch (sc->sc_type) {
10456 	case WM_T_82541:
10457 	case WM_T_82541_2:
10458 	case WM_T_82547:
10459 	case WM_T_82547_2:
10460 		/* Set dummy value to access EEPROM */
10461 		sc->sc_nvm_wordsize = 64;
10462 		wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data);
10463 		reg = data;
10464 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
10465 		if (size == 0)
10466 			size = 6; /* 64 word size */
10467 		else
10468 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
10469 		break;
10470 	case WM_T_80003:
10471 	case WM_T_82571:
10472 	case WM_T_82572:
10473 	case WM_T_82573: /* SPI case */
10474 	case WM_T_82574: /* SPI case */
10475 	case WM_T_82583: /* SPI case */
10476 		size += NVM_WORD_SIZE_BASE_SHIFT;
10477 		if (size > 14)
10478 			size = 14;
10479 		break;
10480 	case WM_T_82575:
10481 	case WM_T_82576:
10482 	case WM_T_82580:
10483 	case WM_T_I350:
10484 	case WM_T_I354:
10485 	case WM_T_I210:
10486 	case WM_T_I211:
10487 		size += NVM_WORD_SIZE_BASE_SHIFT;
10488 		if (size > 15)
10489 			size = 15;
10490 		break;
10491 	default:
10492 		aprint_error_dev(sc->sc_dev,
10493 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
10494 		return -1;
10495 		break;
10496 	}
10497 
10498 	sc->sc_nvm_wordsize = 1 << size;
10499 
10500 	return 0;
10501 }
10502 
10503 /*
10504  * wm_nvm_ready_spi:
10505  *
10506  *	Wait for a SPI EEPROM to be ready for commands.
10507  */
10508 static int
10509 wm_nvm_ready_spi(struct wm_softc *sc)
10510 {
10511 	uint32_t val;
10512 	int usec;
10513 
10514 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10515 		device_xname(sc->sc_dev), __func__));
10516 
10517 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
10518 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
10519 		wm_eeprom_recvbits(sc, &val, 8);
10520 		if ((val & SPI_SR_RDY) == 0)
10521 			break;
10522 	}
10523 	if (usec >= SPI_MAX_RETRIES) {
10524 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
10525 		return 1;
10526 	}
10527 	return 0;
10528 }
10529 
10530 /*
10531  * wm_nvm_read_spi:
10532  *
10533  *	Read a work from the EEPROM using the SPI protocol.
10534  */
10535 static int
10536 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
10537 {
10538 	uint32_t reg, val;
10539 	int i;
10540 	uint8_t opc;
10541 
10542 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10543 		device_xname(sc->sc_dev), __func__));
10544 
10545 	/* Clear SK and CS. */
10546 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
10547 	CSR_WRITE(sc, WMREG_EECD, reg);
10548 	CSR_WRITE_FLUSH(sc);
10549 	delay(2);
10550 
10551 	if (wm_nvm_ready_spi(sc))
10552 		return 1;
10553 
10554 	/* Toggle CS to flush commands. */
10555 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
10556 	CSR_WRITE_FLUSH(sc);
10557 	delay(2);
10558 	CSR_WRITE(sc, WMREG_EECD, reg);
10559 	CSR_WRITE_FLUSH(sc);
10560 	delay(2);
10561 
10562 	opc = SPI_OPC_READ;
10563 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
10564 		opc |= SPI_OPC_A8;
10565 
10566 	wm_eeprom_sendbits(sc, opc, 8);
10567 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
10568 
10569 	for (i = 0; i < wordcnt; i++) {
10570 		wm_eeprom_recvbits(sc, &val, 16);
10571 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
10572 	}
10573 
10574 	/* Raise CS and clear SK. */
10575 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
10576 	CSR_WRITE(sc, WMREG_EECD, reg);
10577 	CSR_WRITE_FLUSH(sc);
10578 	delay(2);
10579 
10580 	return 0;
10581 }
10582 
10583 /* Using with EERD */
10584 
10585 static int
10586 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
10587 {
10588 	uint32_t attempts = 100000;
10589 	uint32_t i, reg = 0;
10590 	int32_t done = -1;
10591 
10592 	for (i = 0; i < attempts; i++) {
10593 		reg = CSR_READ(sc, rw);
10594 
10595 		if (reg & EERD_DONE) {
10596 			done = 0;
10597 			break;
10598 		}
10599 		delay(5);
10600 	}
10601 
10602 	return done;
10603 }
10604 
10605 static int
10606 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt,
10607     uint16_t *data)
10608 {
10609 	int i, eerd = 0;
10610 	int error = 0;
10611 
10612 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10613 		device_xname(sc->sc_dev), __func__));
10614 
10615 	for (i = 0; i < wordcnt; i++) {
10616 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
10617 
10618 		CSR_WRITE(sc, WMREG_EERD, eerd);
10619 		error = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
10620 		if (error != 0)
10621 			break;
10622 
10623 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
10624 	}
10625 
10626 	return error;
10627 }
10628 
10629 /* Flash */
10630 
10631 static int
10632 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
10633 {
10634 	uint32_t eecd;
10635 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
10636 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
10637 	uint8_t sig_byte = 0;
10638 
10639 	switch (sc->sc_type) {
10640 	case WM_T_PCH_SPT:
10641 		/*
10642 		 * In SPT, read from the CTRL_EXT reg instead of accessing the
10643 		 * sector valid bits from the NVM.
10644 		 */
10645 		*bank = CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_NVMVS;
10646 		if ((*bank == 0) || (*bank == 1)) {
10647 			aprint_error_dev(sc->sc_dev,
10648 			    "%s: no valid NVM bank present (%u)\n", __func__,
10649 				*bank);
10650 			return -1;
10651 		} else {
10652 			*bank = *bank - 2;
10653 			return 0;
10654 		}
10655 	case WM_T_ICH8:
10656 	case WM_T_ICH9:
10657 		eecd = CSR_READ(sc, WMREG_EECD);
10658 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
10659 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
10660 			return 0;
10661 		}
10662 		/* FALLTHROUGH */
10663 	default:
10664 		/* Default to 0 */
10665 		*bank = 0;
10666 
10667 		/* Check bank 0 */
10668 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
10669 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10670 			*bank = 0;
10671 			return 0;
10672 		}
10673 
10674 		/* Check bank 1 */
10675 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
10676 		    &sig_byte);
10677 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
10678 			*bank = 1;
10679 			return 0;
10680 		}
10681 	}
10682 
10683 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
10684 		device_xname(sc->sc_dev)));
10685 	return -1;
10686 }
10687 
10688 /******************************************************************************
10689  * This function does initial flash setup so that a new read/write/erase cycle
10690  * can be started.
10691  *
10692  * sc - The pointer to the hw structure
10693  ****************************************************************************/
10694 static int32_t
10695 wm_ich8_cycle_init(struct wm_softc *sc)
10696 {
10697 	uint16_t hsfsts;
10698 	int32_t error = 1;
10699 	int32_t i     = 0;
10700 
10701 	hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10702 
10703 	/* May be check the Flash Des Valid bit in Hw status */
10704 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
10705 		return error;
10706 	}
10707 
10708 	/* Clear FCERR in Hw status by writing 1 */
10709 	/* Clear DAEL in Hw status by writing a 1 */
10710 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
10711 
10712 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10713 
10714 	/*
10715 	 * Either we should have a hardware SPI cycle in progress bit to check
10716 	 * against, in order to start a new cycle or FDONE bit should be
10717 	 * changed in the hardware so that it is 1 after harware reset, which
10718 	 * can then be used as an indication whether a cycle is in progress or
10719 	 * has been completed .. we should also have some software semaphore
10720 	 * mechanism to guard FDONE or the cycle in progress bit so that two
10721 	 * threads access to those bits can be sequentiallized or a way so that
10722 	 * 2 threads dont start the cycle at the same time
10723 	 */
10724 
10725 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10726 		/*
10727 		 * There is no cycle running at present, so we can start a
10728 		 * cycle
10729 		 */
10730 
10731 		/* Begin by setting Flash Cycle Done. */
10732 		hsfsts |= HSFSTS_DONE;
10733 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10734 		error = 0;
10735 	} else {
10736 		/*
10737 		 * otherwise poll for sometime so the current cycle has a
10738 		 * chance to end before giving up.
10739 		 */
10740 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
10741 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10742 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
10743 				error = 0;
10744 				break;
10745 			}
10746 			delay(1);
10747 		}
10748 		if (error == 0) {
10749 			/*
10750 			 * Successful in waiting for previous cycle to timeout,
10751 			 * now set the Flash Cycle Done.
10752 			 */
10753 			hsfsts |= HSFSTS_DONE;
10754 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
10755 		}
10756 	}
10757 	return error;
10758 }
10759 
10760 /******************************************************************************
10761  * This function starts a flash cycle and waits for its completion
10762  *
10763  * sc - The pointer to the hw structure
10764  ****************************************************************************/
10765 static int32_t
10766 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
10767 {
10768 	uint16_t hsflctl;
10769 	uint16_t hsfsts;
10770 	int32_t error = 1;
10771 	uint32_t i = 0;
10772 
10773 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
10774 	hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10775 	hsflctl |= HSFCTL_GO;
10776 	ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10777 
10778 	/* Wait till FDONE bit is set to 1 */
10779 	do {
10780 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10781 		if (hsfsts & HSFSTS_DONE)
10782 			break;
10783 		delay(1);
10784 		i++;
10785 	} while (i < timeout);
10786 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
10787 		error = 0;
10788 
10789 	return error;
10790 }
10791 
10792 /******************************************************************************
10793  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
10794  *
10795  * sc - The pointer to the hw structure
10796  * index - The index of the byte or word to read.
10797  * size - Size of data to read, 1=byte 2=word, 4=dword
10798  * data - Pointer to the word to store the value read.
10799  *****************************************************************************/
10800 static int32_t
10801 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
10802     uint32_t size, uint32_t *data)
10803 {
10804 	uint16_t hsfsts;
10805 	uint16_t hsflctl;
10806 	uint32_t flash_linear_address;
10807 	uint32_t flash_data = 0;
10808 	int32_t error = 1;
10809 	int32_t count = 0;
10810 
10811 	if (size < 1  || size > 4 || data == 0x0 ||
10812 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
10813 		return error;
10814 
10815 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
10816 	    sc->sc_ich8_flash_base;
10817 
10818 	do {
10819 		delay(1);
10820 		/* Steps */
10821 		error = wm_ich8_cycle_init(sc);
10822 		if (error)
10823 			break;
10824 
10825 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
10826 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
10827 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
10828 		    & HSFCTL_BCOUNT_MASK;
10829 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
10830 		if (sc->sc_type == WM_T_PCH_SPT) {
10831 			/*
10832 			 * In SPT, This register is in Lan memory space, not
10833 			 * flash. Therefore, only 32 bit access is supported.
10834 			 */
10835 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFCTL,
10836 			    (uint32_t)hsflctl);
10837 		} else
10838 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
10839 
10840 		/*
10841 		 * Write the last 24 bits of index into Flash Linear address
10842 		 * field in Flash Address
10843 		 */
10844 		/* TODO: TBD maybe check the index against the size of flash */
10845 
10846 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
10847 
10848 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
10849 
10850 		/*
10851 		 * Check if FCERR is set to 1, if set to 1, clear it and try
10852 		 * the whole sequence a few more times, else read in (shift in)
10853 		 * the Flash Data0, the order is least significant byte first
10854 		 * msb to lsb
10855 		 */
10856 		if (error == 0) {
10857 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
10858 			if (size == 1)
10859 				*data = (uint8_t)(flash_data & 0x000000FF);
10860 			else if (size == 2)
10861 				*data = (uint16_t)(flash_data & 0x0000FFFF);
10862 			else if (size == 4)
10863 				*data = (uint32_t)flash_data;
10864 			break;
10865 		} else {
10866 			/*
10867 			 * If we've gotten here, then things are probably
10868 			 * completely hosed, but if the error condition is
10869 			 * detected, it won't hurt to give it another try...
10870 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
10871 			 */
10872 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
10873 			if (hsfsts & HSFSTS_ERR) {
10874 				/* Repeat for some time before giving up. */
10875 				continue;
10876 			} else if ((hsfsts & HSFSTS_DONE) == 0)
10877 				break;
10878 		}
10879 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
10880 
10881 	return error;
10882 }
10883 
10884 /******************************************************************************
10885  * Reads a single byte from the NVM using the ICH8 flash access registers.
10886  *
10887  * sc - pointer to wm_hw structure
10888  * index - The index of the byte to read.
10889  * data - Pointer to a byte to store the value read.
10890  *****************************************************************************/
10891 static int32_t
10892 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
10893 {
10894 	int32_t status;
10895 	uint32_t word = 0;
10896 
10897 	status = wm_read_ich8_data(sc, index, 1, &word);
10898 	if (status == 0)
10899 		*data = (uint8_t)word;
10900 	else
10901 		*data = 0;
10902 
10903 	return status;
10904 }
10905 
10906 /******************************************************************************
10907  * Reads a word from the NVM using the ICH8 flash access registers.
10908  *
10909  * sc - pointer to wm_hw structure
10910  * index - The starting byte index of the word to read.
10911  * data - Pointer to a word to store the value read.
10912  *****************************************************************************/
10913 static int32_t
10914 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
10915 {
10916 	int32_t status;
10917 	uint32_t word = 0;
10918 
10919 	status = wm_read_ich8_data(sc, index, 2, &word);
10920 	if (status == 0)
10921 		*data = (uint16_t)word;
10922 	else
10923 		*data = 0;
10924 
10925 	return status;
10926 }
10927 
10928 /******************************************************************************
10929  * Reads a dword from the NVM using the ICH8 flash access registers.
10930  *
10931  * sc - pointer to wm_hw structure
10932  * index - The starting byte index of the word to read.
10933  * data - Pointer to a word to store the value read.
10934  *****************************************************************************/
10935 static int32_t
10936 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
10937 {
10938 	int32_t status;
10939 
10940 	status = wm_read_ich8_data(sc, index, 4, data);
10941 	return status;
10942 }
10943 
10944 /******************************************************************************
10945  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
10946  * register.
10947  *
10948  * sc - Struct containing variables accessed by shared code
10949  * offset - offset of word in the EEPROM to read
10950  * data - word read from the EEPROM
10951  * words - number of words to read
10952  *****************************************************************************/
10953 static int
10954 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
10955 {
10956 	int32_t  error = 0;
10957 	uint32_t flash_bank = 0;
10958 	uint32_t act_offset = 0;
10959 	uint32_t bank_offset = 0;
10960 	uint16_t word = 0;
10961 	uint16_t i = 0;
10962 
10963 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
10964 		device_xname(sc->sc_dev), __func__));
10965 
10966 	/*
10967 	 * We need to know which is the valid flash bank.  In the event
10968 	 * that we didn't allocate eeprom_shadow_ram, we may not be
10969 	 * managing flash_bank.  So it cannot be trusted and needs
10970 	 * to be updated with each read.
10971 	 */
10972 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
10973 	if (error) {
10974 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
10975 			device_xname(sc->sc_dev)));
10976 		flash_bank = 0;
10977 	}
10978 
10979 	/*
10980 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
10981 	 * size
10982 	 */
10983 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
10984 
10985 	error = wm_get_swfwhw_semaphore(sc);
10986 	if (error) {
10987 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
10988 		    __func__);
10989 		return error;
10990 	}
10991 
10992 	for (i = 0; i < words; i++) {
10993 		/* The NVM part needs a byte offset, hence * 2 */
10994 		act_offset = bank_offset + ((offset + i) * 2);
10995 		error = wm_read_ich8_word(sc, act_offset, &word);
10996 		if (error) {
10997 			aprint_error_dev(sc->sc_dev,
10998 			    "%s: failed to read NVM\n", __func__);
10999 			break;
11000 		}
11001 		data[i] = word;
11002 	}
11003 
11004 	wm_put_swfwhw_semaphore(sc);
11005 	return error;
11006 }
11007 
11008 /******************************************************************************
11009  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
11010  * register.
11011  *
11012  * sc - Struct containing variables accessed by shared code
11013  * offset - offset of word in the EEPROM to read
11014  * data - word read from the EEPROM
11015  * words - number of words to read
11016  *****************************************************************************/
11017 static int
11018 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
11019 {
11020 	int32_t  error = 0;
11021 	uint32_t flash_bank = 0;
11022 	uint32_t act_offset = 0;
11023 	uint32_t bank_offset = 0;
11024 	uint32_t dword = 0;
11025 	uint16_t i = 0;
11026 
11027 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11028 		device_xname(sc->sc_dev), __func__));
11029 
11030 	/*
11031 	 * We need to know which is the valid flash bank.  In the event
11032 	 * that we didn't allocate eeprom_shadow_ram, we may not be
11033 	 * managing flash_bank.  So it cannot be trusted and needs
11034 	 * to be updated with each read.
11035 	 */
11036 	error = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
11037 	if (error) {
11038 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
11039 			device_xname(sc->sc_dev)));
11040 		flash_bank = 0;
11041 	}
11042 
11043 	/*
11044 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
11045 	 * size
11046 	 */
11047 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
11048 
11049 	error = wm_get_swfwhw_semaphore(sc);
11050 	if (error) {
11051 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11052 		    __func__);
11053 		return error;
11054 	}
11055 
11056 	for (i = 0; i < words; i++) {
11057 		/* The NVM part needs a byte offset, hence * 2 */
11058 		act_offset = bank_offset + ((offset + i) * 2);
11059 		/* but we must read dword aligned, so mask ... */
11060 		error = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
11061 		if (error) {
11062 			aprint_error_dev(sc->sc_dev,
11063 			    "%s: failed to read NVM\n", __func__);
11064 			break;
11065 		}
11066 		/* ... and pick out low or high word */
11067 		if ((act_offset & 0x2) == 0)
11068 			data[i] = (uint16_t)(dword & 0xFFFF);
11069 		else
11070 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
11071 	}
11072 
11073 	wm_put_swfwhw_semaphore(sc);
11074 	return error;
11075 }
11076 
11077 /* iNVM */
11078 
11079 static int
11080 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
11081 {
11082 	int32_t  rv = 0;
11083 	uint32_t invm_dword;
11084 	uint16_t i;
11085 	uint8_t record_type, word_address;
11086 
11087 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11088 		device_xname(sc->sc_dev), __func__));
11089 
11090 	for (i = 0; i < INVM_SIZE; i++) {
11091 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
11092 		/* Get record type */
11093 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
11094 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
11095 			break;
11096 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
11097 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
11098 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
11099 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
11100 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
11101 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
11102 			if (word_address == address) {
11103 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
11104 				rv = 0;
11105 				break;
11106 			}
11107 		}
11108 	}
11109 
11110 	return rv;
11111 }
11112 
11113 static int
11114 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
11115 {
11116 	int rv = 0;
11117 	int i;
11118 
11119 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11120 		device_xname(sc->sc_dev), __func__));
11121 
11122 	for (i = 0; i < words; i++) {
11123 		switch (offset + i) {
11124 		case NVM_OFF_MACADDR:
11125 		case NVM_OFF_MACADDR1:
11126 		case NVM_OFF_MACADDR2:
11127 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
11128 			if (rv != 0) {
11129 				data[i] = 0xffff;
11130 				rv = -1;
11131 			}
11132 			break;
11133 		case NVM_OFF_CFG2:
11134 			rv = wm_nvm_read_word_invm(sc, offset, data);
11135 			if (rv != 0) {
11136 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
11137 				rv = 0;
11138 			}
11139 			break;
11140 		case NVM_OFF_CFG4:
11141 			rv = wm_nvm_read_word_invm(sc, offset, data);
11142 			if (rv != 0) {
11143 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
11144 				rv = 0;
11145 			}
11146 			break;
11147 		case NVM_OFF_LED_1_CFG:
11148 			rv = wm_nvm_read_word_invm(sc, offset, data);
11149 			if (rv != 0) {
11150 				*data = NVM_LED_1_CFG_DEFAULT_I211;
11151 				rv = 0;
11152 			}
11153 			break;
11154 		case NVM_OFF_LED_0_2_CFG:
11155 			rv = wm_nvm_read_word_invm(sc, offset, data);
11156 			if (rv != 0) {
11157 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
11158 				rv = 0;
11159 			}
11160 			break;
11161 		case NVM_OFF_ID_LED_SETTINGS:
11162 			rv = wm_nvm_read_word_invm(sc, offset, data);
11163 			if (rv != 0) {
11164 				*data = ID_LED_RESERVED_FFFF;
11165 				rv = 0;
11166 			}
11167 			break;
11168 		default:
11169 			DPRINTF(WM_DEBUG_NVM,
11170 			    ("NVM word 0x%02x is not mapped.\n", offset));
11171 			*data = NVM_RESERVED_WORD;
11172 			break;
11173 		}
11174 	}
11175 
11176 	return rv;
11177 }
11178 
11179 /* Lock, detecting NVM type, validate checksum, version and read */
11180 
11181 /*
11182  * wm_nvm_acquire:
11183  *
11184  *	Perform the EEPROM handshake required on some chips.
11185  */
11186 static int
11187 wm_nvm_acquire(struct wm_softc *sc)
11188 {
11189 	uint32_t reg;
11190 	int x;
11191 	int ret = 0;
11192 
11193 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11194 		device_xname(sc->sc_dev), __func__));
11195 
11196 	if (sc->sc_type >= WM_T_ICH8) {
11197 		ret = wm_get_nvm_ich8lan(sc);
11198 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF) {
11199 		ret = wm_get_swfwhw_semaphore(sc);
11200 	} else if (sc->sc_flags & WM_F_LOCK_SWFW) {
11201 		/* This will also do wm_get_swsm_semaphore() if needed */
11202 		ret = wm_get_swfw_semaphore(sc, SWFW_EEP_SM);
11203 	} else if (sc->sc_flags & WM_F_LOCK_SWSM) {
11204 		ret = wm_get_swsm_semaphore(sc);
11205 	}
11206 
11207 	if (ret) {
11208 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
11209 			__func__);
11210 		return 1;
11211 	}
11212 
11213 	if (sc->sc_flags & WM_F_LOCK_EECD) {
11214 		reg = CSR_READ(sc, WMREG_EECD);
11215 
11216 		/* Request EEPROM access. */
11217 		reg |= EECD_EE_REQ;
11218 		CSR_WRITE(sc, WMREG_EECD, reg);
11219 
11220 		/* ..and wait for it to be granted. */
11221 		for (x = 0; x < 1000; x++) {
11222 			reg = CSR_READ(sc, WMREG_EECD);
11223 			if (reg & EECD_EE_GNT)
11224 				break;
11225 			delay(5);
11226 		}
11227 		if ((reg & EECD_EE_GNT) == 0) {
11228 			aprint_error_dev(sc->sc_dev,
11229 			    "could not acquire EEPROM GNT\n");
11230 			reg &= ~EECD_EE_REQ;
11231 			CSR_WRITE(sc, WMREG_EECD, reg);
11232 			if (sc->sc_flags & WM_F_LOCK_EXTCNF)
11233 				wm_put_swfwhw_semaphore(sc);
11234 			if (sc->sc_flags & WM_F_LOCK_SWFW)
11235 				wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
11236 			else if (sc->sc_flags & WM_F_LOCK_SWSM)
11237 				wm_put_swsm_semaphore(sc);
11238 			return 1;
11239 		}
11240 	}
11241 
11242 	return 0;
11243 }
11244 
11245 /*
11246  * wm_nvm_release:
11247  *
11248  *	Release the EEPROM mutex.
11249  */
11250 static void
11251 wm_nvm_release(struct wm_softc *sc)
11252 {
11253 	uint32_t reg;
11254 
11255 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11256 		device_xname(sc->sc_dev), __func__));
11257 
11258 	if (sc->sc_flags & WM_F_LOCK_EECD) {
11259 		reg = CSR_READ(sc, WMREG_EECD);
11260 		reg &= ~EECD_EE_REQ;
11261 		CSR_WRITE(sc, WMREG_EECD, reg);
11262 	}
11263 
11264 	if (sc->sc_type >= WM_T_ICH8) {
11265 		wm_put_nvm_ich8lan(sc);
11266 	} else if (sc->sc_flags & WM_F_LOCK_EXTCNF)
11267 		wm_put_swfwhw_semaphore(sc);
11268 	if (sc->sc_flags & WM_F_LOCK_SWFW)
11269 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
11270 	else if (sc->sc_flags & WM_F_LOCK_SWSM)
11271 		wm_put_swsm_semaphore(sc);
11272 }
11273 
11274 static int
11275 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
11276 {
11277 	uint32_t eecd = 0;
11278 
11279 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
11280 	    || sc->sc_type == WM_T_82583) {
11281 		eecd = CSR_READ(sc, WMREG_EECD);
11282 
11283 		/* Isolate bits 15 & 16 */
11284 		eecd = ((eecd >> 15) & 0x03);
11285 
11286 		/* If both bits are set, device is Flash type */
11287 		if (eecd == 0x03)
11288 			return 0;
11289 	}
11290 	return 1;
11291 }
11292 
11293 static int
11294 wm_nvm_get_flash_presence_i210(struct wm_softc *sc)
11295 {
11296 	uint32_t eec;
11297 
11298 	eec = CSR_READ(sc, WMREG_EEC);
11299 	if ((eec & EEC_FLASH_DETECTED) != 0)
11300 		return 1;
11301 
11302 	return 0;
11303 }
11304 
11305 /*
11306  * wm_nvm_validate_checksum
11307  *
11308  * The checksum is defined as the sum of the first 64 (16 bit) words.
11309  */
11310 static int
11311 wm_nvm_validate_checksum(struct wm_softc *sc)
11312 {
11313 	uint16_t checksum;
11314 	uint16_t eeprom_data;
11315 #ifdef WM_DEBUG
11316 	uint16_t csum_wordaddr, valid_checksum;
11317 #endif
11318 	int i;
11319 
11320 	checksum = 0;
11321 
11322 	/* Don't check for I211 */
11323 	if (sc->sc_type == WM_T_I211)
11324 		return 0;
11325 
11326 #ifdef WM_DEBUG
11327 	if (sc->sc_type == WM_T_PCH_LPT) {
11328 		csum_wordaddr = NVM_OFF_COMPAT;
11329 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
11330 	} else {
11331 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
11332 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
11333 	}
11334 
11335 	/* Dump EEPROM image for debug */
11336 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11337 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11338 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
11339 		/* XXX PCH_SPT? */
11340 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
11341 		if ((eeprom_data & valid_checksum) == 0) {
11342 			DPRINTF(WM_DEBUG_NVM,
11343 			    ("%s: NVM need to be updated (%04x != %04x)\n",
11344 				device_xname(sc->sc_dev), eeprom_data,
11345 				    valid_checksum));
11346 		}
11347 	}
11348 
11349 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
11350 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
11351 		for (i = 0; i < NVM_SIZE; i++) {
11352 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
11353 				printf("XXXX ");
11354 			else
11355 				printf("%04hx ", eeprom_data);
11356 			if (i % 8 == 7)
11357 				printf("\n");
11358 		}
11359 	}
11360 
11361 #endif /* WM_DEBUG */
11362 
11363 	for (i = 0; i < NVM_SIZE; i++) {
11364 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
11365 			return 1;
11366 		checksum += eeprom_data;
11367 	}
11368 
11369 	if (checksum != (uint16_t) NVM_CHECKSUM) {
11370 #ifdef WM_DEBUG
11371 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
11372 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
11373 #endif
11374 	}
11375 
11376 	return 0;
11377 }
11378 
11379 static void
11380 wm_nvm_version_invm(struct wm_softc *sc)
11381 {
11382 	uint32_t dword;
11383 
11384 	/*
11385 	 * Linux's code to decode version is very strange, so we don't
11386 	 * obey that algorithm and just use word 61 as the document.
11387 	 * Perhaps it's not perfect though...
11388 	 *
11389 	 * Example:
11390 	 *
11391 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
11392 	 */
11393 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
11394 	dword = __SHIFTOUT(dword, INVM_VER_1);
11395 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
11396 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
11397 }
11398 
11399 static void
11400 wm_nvm_version(struct wm_softc *sc)
11401 {
11402 	uint16_t major, minor, build, patch;
11403 	uint16_t uid0, uid1;
11404 	uint16_t nvm_data;
11405 	uint16_t off;
11406 	bool check_version = false;
11407 	bool check_optionrom = false;
11408 	bool have_build = false;
11409 
11410 	/*
11411 	 * Version format:
11412 	 *
11413 	 * XYYZ
11414 	 * X0YZ
11415 	 * X0YY
11416 	 *
11417 	 * Example:
11418 	 *
11419 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
11420 	 *	82571	0x50a6	5.10.6?
11421 	 *	82572	0x506a	5.6.10?
11422 	 *	82572EI	0x5069	5.6.9?
11423 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
11424 	 *		0x2013	2.1.3?
11425 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
11426 	 */
11427 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1);
11428 	switch (sc->sc_type) {
11429 	case WM_T_82571:
11430 	case WM_T_82572:
11431 	case WM_T_82574:
11432 	case WM_T_82583:
11433 		check_version = true;
11434 		check_optionrom = true;
11435 		have_build = true;
11436 		break;
11437 	case WM_T_82575:
11438 	case WM_T_82576:
11439 	case WM_T_82580:
11440 		if ((uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
11441 			check_version = true;
11442 		break;
11443 	case WM_T_I211:
11444 		wm_nvm_version_invm(sc);
11445 		goto printver;
11446 	case WM_T_I210:
11447 		if (!wm_nvm_get_flash_presence_i210(sc)) {
11448 			wm_nvm_version_invm(sc);
11449 			goto printver;
11450 		}
11451 		/* FALLTHROUGH */
11452 	case WM_T_I350:
11453 	case WM_T_I354:
11454 		check_version = true;
11455 		check_optionrom = true;
11456 		break;
11457 	default:
11458 		return;
11459 	}
11460 	if (check_version) {
11461 		wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data);
11462 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
11463 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
11464 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
11465 			build = nvm_data & NVM_BUILD_MASK;
11466 			have_build = true;
11467 		} else
11468 			minor = nvm_data & 0x00ff;
11469 
11470 		/* Decimal */
11471 		minor = (minor / 16) * 10 + (minor % 16);
11472 		sc->sc_nvm_ver_major = major;
11473 		sc->sc_nvm_ver_minor = minor;
11474 
11475 printver:
11476 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
11477 		    sc->sc_nvm_ver_minor);
11478 		if (have_build) {
11479 			sc->sc_nvm_ver_build = build;
11480 			aprint_verbose(".%d", build);
11481 		}
11482 	}
11483 	if (check_optionrom) {
11484 		wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off);
11485 		/* Option ROM Version */
11486 		if ((off != 0x0000) && (off != 0xffff)) {
11487 			off += NVM_COMBO_VER_OFF;
11488 			wm_nvm_read(sc, off + 1, 1, &uid1);
11489 			wm_nvm_read(sc, off, 1, &uid0);
11490 			if ((uid0 != 0) && (uid0 != 0xffff)
11491 			    && (uid1 != 0) && (uid1 != 0xffff)) {
11492 				/* 16bits */
11493 				major = uid0 >> 8;
11494 				build = (uid0 << 8) | (uid1 >> 8);
11495 				patch = uid1 & 0x00ff;
11496 				aprint_verbose(", option ROM Version %d.%d.%d",
11497 				    major, build, patch);
11498 			}
11499 		}
11500 	}
11501 
11502 	wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0);
11503 	aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
11504 }
11505 
11506 /*
11507  * wm_nvm_read:
11508  *
11509  *	Read data from the serial EEPROM.
11510  */
11511 static int
11512 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11513 {
11514 	int rv;
11515 
11516 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11517 		device_xname(sc->sc_dev), __func__));
11518 
11519 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
11520 		return 1;
11521 
11522 	if (wm_nvm_acquire(sc))
11523 		return 1;
11524 
11525 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
11526 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
11527 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT))
11528 		rv = wm_nvm_read_ich8(sc, word, wordcnt, data);
11529 	else if (sc->sc_type == WM_T_PCH_SPT)
11530 		rv = wm_nvm_read_spt(sc, word, wordcnt, data);
11531 	else if (sc->sc_flags & WM_F_EEPROM_INVM)
11532 		rv = wm_nvm_read_invm(sc, word, wordcnt, data);
11533 	else if (sc->sc_flags & WM_F_EEPROM_EERDEEWR)
11534 		rv = wm_nvm_read_eerd(sc, word, wordcnt, data);
11535 	else if (sc->sc_flags & WM_F_EEPROM_SPI)
11536 		rv = wm_nvm_read_spi(sc, word, wordcnt, data);
11537 	else
11538 		rv = wm_nvm_read_uwire(sc, word, wordcnt, data);
11539 
11540 	wm_nvm_release(sc);
11541 	return rv;
11542 }
11543 
11544 /*
11545  * Hardware semaphores.
11546  * Very complexed...
11547  */
11548 
11549 static int
11550 wm_get_null(struct wm_softc *sc)
11551 {
11552 
11553 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11554 		device_xname(sc->sc_dev), __func__));
11555 	return 0;
11556 }
11557 
11558 static void
11559 wm_put_null(struct wm_softc *sc)
11560 {
11561 
11562 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11563 		device_xname(sc->sc_dev), __func__));
11564 	return;
11565 }
11566 
11567 /*
11568  * Get hardware semaphore.
11569  * Same as e1000_get_hw_semaphore_generic()
11570  */
11571 static int
11572 wm_get_swsm_semaphore(struct wm_softc *sc)
11573 {
11574 	int32_t timeout;
11575 	uint32_t swsm;
11576 
11577 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11578 		device_xname(sc->sc_dev), __func__));
11579 	KASSERT(sc->sc_nvm_wordsize > 0);
11580 
11581 	/* Get the SW semaphore. */
11582 	timeout = sc->sc_nvm_wordsize + 1;
11583 	while (timeout) {
11584 		swsm = CSR_READ(sc, WMREG_SWSM);
11585 
11586 		if ((swsm & SWSM_SMBI) == 0)
11587 			break;
11588 
11589 		delay(50);
11590 		timeout--;
11591 	}
11592 
11593 	if (timeout == 0) {
11594 		aprint_error_dev(sc->sc_dev,
11595 		    "could not acquire SWSM SMBI\n");
11596 		return 1;
11597 	}
11598 
11599 	/* Get the FW semaphore. */
11600 	timeout = sc->sc_nvm_wordsize + 1;
11601 	while (timeout) {
11602 		swsm = CSR_READ(sc, WMREG_SWSM);
11603 		swsm |= SWSM_SWESMBI;
11604 		CSR_WRITE(sc, WMREG_SWSM, swsm);
11605 		/* If we managed to set the bit we got the semaphore. */
11606 		swsm = CSR_READ(sc, WMREG_SWSM);
11607 		if (swsm & SWSM_SWESMBI)
11608 			break;
11609 
11610 		delay(50);
11611 		timeout--;
11612 	}
11613 
11614 	if (timeout == 0) {
11615 		aprint_error_dev(sc->sc_dev,
11616 		    "could not acquire SWSM SWESMBI\n");
11617 		/* Release semaphores */
11618 		wm_put_swsm_semaphore(sc);
11619 		return 1;
11620 	}
11621 	return 0;
11622 }
11623 
11624 /*
11625  * Put hardware semaphore.
11626  * Same as e1000_put_hw_semaphore_generic()
11627  */
11628 static void
11629 wm_put_swsm_semaphore(struct wm_softc *sc)
11630 {
11631 	uint32_t swsm;
11632 
11633 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11634 		device_xname(sc->sc_dev), __func__));
11635 
11636 	swsm = CSR_READ(sc, WMREG_SWSM);
11637 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
11638 	CSR_WRITE(sc, WMREG_SWSM, swsm);
11639 }
11640 
11641 /*
11642  * Get SW/FW semaphore.
11643  * Same as e1000_acquire_swfw_sync_82575().
11644  */
11645 static int
11646 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11647 {
11648 	uint32_t swfw_sync;
11649 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
11650 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
11651 	int timeout = 200;
11652 
11653 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11654 		device_xname(sc->sc_dev), __func__));
11655 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11656 
11657 	for (timeout = 0; timeout < 200; timeout++) {
11658 		if (sc->sc_flags & WM_F_LOCK_SWSM) {
11659 			if (wm_get_swsm_semaphore(sc)) {
11660 				aprint_error_dev(sc->sc_dev,
11661 				    "%s: failed to get semaphore\n",
11662 				    __func__);
11663 				return 1;
11664 			}
11665 		}
11666 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11667 		if ((swfw_sync & (swmask | fwmask)) == 0) {
11668 			swfw_sync |= swmask;
11669 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11670 			if (sc->sc_flags & WM_F_LOCK_SWSM)
11671 				wm_put_swsm_semaphore(sc);
11672 			return 0;
11673 		}
11674 		if (sc->sc_flags & WM_F_LOCK_SWSM)
11675 			wm_put_swsm_semaphore(sc);
11676 		delay(5000);
11677 	}
11678 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
11679 	    device_xname(sc->sc_dev), mask, swfw_sync);
11680 	return 1;
11681 }
11682 
11683 static void
11684 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
11685 {
11686 	uint32_t swfw_sync;
11687 
11688 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11689 		device_xname(sc->sc_dev), __func__));
11690 	KASSERT((sc->sc_flags & WM_F_LOCK_SWSM) != 0);
11691 
11692 	if (sc->sc_flags & WM_F_LOCK_SWSM) {
11693 		while (wm_get_swsm_semaphore(sc) != 0)
11694 			continue;
11695 	}
11696 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
11697 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
11698 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
11699 	if (sc->sc_flags & WM_F_LOCK_SWSM)
11700 		wm_put_swsm_semaphore(sc);
11701 }
11702 
11703 static int
11704 wm_get_phy_82575(struct wm_softc *sc)
11705 {
11706 
11707 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11708 		device_xname(sc->sc_dev), __func__));
11709 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11710 }
11711 
11712 static void
11713 wm_put_phy_82575(struct wm_softc *sc)
11714 {
11715 
11716 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11717 		device_xname(sc->sc_dev), __func__));
11718 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
11719 }
11720 
11721 static int
11722 wm_get_swfwhw_semaphore(struct wm_softc *sc)
11723 {
11724 	uint32_t ext_ctrl;
11725 	int timeout = 200;
11726 
11727 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11728 		device_xname(sc->sc_dev), __func__));
11729 
11730 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11731 	for (timeout = 0; timeout < 200; timeout++) {
11732 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11733 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11734 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11735 
11736 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11737 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11738 			return 0;
11739 		delay(5000);
11740 	}
11741 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
11742 	    device_xname(sc->sc_dev), ext_ctrl);
11743 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11744 	return 1;
11745 }
11746 
11747 static void
11748 wm_put_swfwhw_semaphore(struct wm_softc *sc)
11749 {
11750 	uint32_t ext_ctrl;
11751 
11752 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11753 		device_xname(sc->sc_dev), __func__));
11754 
11755 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11756 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11757 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11758 
11759 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
11760 }
11761 
11762 static int
11763 wm_get_swflag_ich8lan(struct wm_softc *sc)
11764 {
11765 	uint32_t ext_ctrl;
11766 	int timeout;
11767 
11768 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11769 		device_xname(sc->sc_dev), __func__));
11770 	mutex_enter(sc->sc_ich_phymtx);
11771 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
11772 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11773 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
11774 			break;
11775 		delay(1000);
11776 	}
11777 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
11778 		printf("%s: SW has already locked the resource\n",
11779 		    device_xname(sc->sc_dev));
11780 		goto out;
11781 	}
11782 
11783 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
11784 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11785 	for (timeout = 0; timeout < 1000; timeout++) {
11786 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11787 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
11788 			break;
11789 		delay(1000);
11790 	}
11791 	if (timeout >= 1000) {
11792 		printf("%s: failed to acquire semaphore\n",
11793 		    device_xname(sc->sc_dev));
11794 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11795 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11796 		goto out;
11797 	}
11798 	return 0;
11799 
11800 out:
11801 	mutex_exit(sc->sc_ich_phymtx);
11802 	return 1;
11803 }
11804 
11805 static void
11806 wm_put_swflag_ich8lan(struct wm_softc *sc)
11807 {
11808 	uint32_t ext_ctrl;
11809 
11810 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11811 		device_xname(sc->sc_dev), __func__));
11812 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
11813 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
11814 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11815 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
11816 	} else {
11817 		printf("%s: Semaphore unexpectedly released\n",
11818 		    device_xname(sc->sc_dev));
11819 	}
11820 
11821 	mutex_exit(sc->sc_ich_phymtx);
11822 }
11823 
11824 static int
11825 wm_get_nvm_ich8lan(struct wm_softc *sc)
11826 {
11827 
11828 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11829 		device_xname(sc->sc_dev), __func__));
11830 	mutex_enter(sc->sc_ich_nvmmtx);
11831 
11832 	return 0;
11833 }
11834 
11835 static void
11836 wm_put_nvm_ich8lan(struct wm_softc *sc)
11837 {
11838 
11839 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11840 		device_xname(sc->sc_dev), __func__));
11841 	mutex_exit(sc->sc_ich_nvmmtx);
11842 }
11843 
11844 static int
11845 wm_get_hw_semaphore_82573(struct wm_softc *sc)
11846 {
11847 	int i = 0;
11848 	uint32_t reg;
11849 
11850 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11851 		device_xname(sc->sc_dev), __func__));
11852 
11853 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11854 	do {
11855 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
11856 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
11857 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11858 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
11859 			break;
11860 		delay(2*1000);
11861 		i++;
11862 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
11863 
11864 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
11865 		wm_put_hw_semaphore_82573(sc);
11866 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
11867 		    device_xname(sc->sc_dev));
11868 		return -1;
11869 	}
11870 
11871 	return 0;
11872 }
11873 
11874 static void
11875 wm_put_hw_semaphore_82573(struct wm_softc *sc)
11876 {
11877 	uint32_t reg;
11878 
11879 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
11880 		device_xname(sc->sc_dev), __func__));
11881 
11882 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
11883 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
11884 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
11885 }
11886 
11887 /*
11888  * Management mode and power management related subroutines.
11889  * BMC, AMT, suspend/resume and EEE.
11890  */
11891 
11892 #ifdef WM_WOL
11893 static int
11894 wm_check_mng_mode(struct wm_softc *sc)
11895 {
11896 	int rv;
11897 
11898 	switch (sc->sc_type) {
11899 	case WM_T_ICH8:
11900 	case WM_T_ICH9:
11901 	case WM_T_ICH10:
11902 	case WM_T_PCH:
11903 	case WM_T_PCH2:
11904 	case WM_T_PCH_LPT:
11905 	case WM_T_PCH_SPT:
11906 		rv = wm_check_mng_mode_ich8lan(sc);
11907 		break;
11908 	case WM_T_82574:
11909 	case WM_T_82583:
11910 		rv = wm_check_mng_mode_82574(sc);
11911 		break;
11912 	case WM_T_82571:
11913 	case WM_T_82572:
11914 	case WM_T_82573:
11915 	case WM_T_80003:
11916 		rv = wm_check_mng_mode_generic(sc);
11917 		break;
11918 	default:
11919 		/* noting to do */
11920 		rv = 0;
11921 		break;
11922 	}
11923 
11924 	return rv;
11925 }
11926 
11927 static int
11928 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
11929 {
11930 	uint32_t fwsm;
11931 
11932 	fwsm = CSR_READ(sc, WMREG_FWSM);
11933 
11934 	if (((fwsm & FWSM_FW_VALID) != 0)
11935 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11936 		return 1;
11937 
11938 	return 0;
11939 }
11940 
11941 static int
11942 wm_check_mng_mode_82574(struct wm_softc *sc)
11943 {
11944 	uint16_t data;
11945 
11946 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11947 
11948 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
11949 		return 1;
11950 
11951 	return 0;
11952 }
11953 
11954 static int
11955 wm_check_mng_mode_generic(struct wm_softc *sc)
11956 {
11957 	uint32_t fwsm;
11958 
11959 	fwsm = CSR_READ(sc, WMREG_FWSM);
11960 
11961 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
11962 		return 1;
11963 
11964 	return 0;
11965 }
11966 #endif /* WM_WOL */
11967 
11968 static int
11969 wm_enable_mng_pass_thru(struct wm_softc *sc)
11970 {
11971 	uint32_t manc, fwsm, factps;
11972 
11973 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
11974 		return 0;
11975 
11976 	manc = CSR_READ(sc, WMREG_MANC);
11977 
11978 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
11979 		device_xname(sc->sc_dev), manc));
11980 	if ((manc & MANC_RECV_TCO_EN) == 0)
11981 		return 0;
11982 
11983 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
11984 		fwsm = CSR_READ(sc, WMREG_FWSM);
11985 		factps = CSR_READ(sc, WMREG_FACTPS);
11986 		if (((factps & FACTPS_MNGCG) == 0)
11987 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
11988 			return 1;
11989 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
11990 		uint16_t data;
11991 
11992 		factps = CSR_READ(sc, WMREG_FACTPS);
11993 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
11994 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
11995 			device_xname(sc->sc_dev), factps, data));
11996 		if (((factps & FACTPS_MNGCG) == 0)
11997 		    && ((data & NVM_CFG2_MNGM_MASK)
11998 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
11999 			return 1;
12000 	} else if (((manc & MANC_SMBUS_EN) != 0)
12001 	    && ((manc & MANC_ASF_EN) == 0))
12002 		return 1;
12003 
12004 	return 0;
12005 }
12006 
12007 static bool
12008 wm_phy_resetisblocked(struct wm_softc *sc)
12009 {
12010 	bool blocked = false;
12011 	uint32_t reg;
12012 	int i = 0;
12013 
12014 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12015 		device_xname(sc->sc_dev), __func__));
12016 
12017 	switch (sc->sc_type) {
12018 	case WM_T_ICH8:
12019 	case WM_T_ICH9:
12020 	case WM_T_ICH10:
12021 	case WM_T_PCH:
12022 	case WM_T_PCH2:
12023 	case WM_T_PCH_LPT:
12024 	case WM_T_PCH_SPT:
12025 		do {
12026 			reg = CSR_READ(sc, WMREG_FWSM);
12027 			if ((reg & FWSM_RSPCIPHY) == 0) {
12028 				blocked = true;
12029 				delay(10*1000);
12030 				continue;
12031 			}
12032 			blocked = false;
12033 		} while (blocked && (i++ < 30));
12034 		return blocked;
12035 		break;
12036 	case WM_T_82571:
12037 	case WM_T_82572:
12038 	case WM_T_82573:
12039 	case WM_T_82574:
12040 	case WM_T_82583:
12041 	case WM_T_80003:
12042 		reg = CSR_READ(sc, WMREG_MANC);
12043 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
12044 			return true;
12045 		else
12046 			return false;
12047 		break;
12048 	default:
12049 		/* no problem */
12050 		break;
12051 	}
12052 
12053 	return false;
12054 }
12055 
12056 static void
12057 wm_get_hw_control(struct wm_softc *sc)
12058 {
12059 	uint32_t reg;
12060 
12061 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12062 		device_xname(sc->sc_dev), __func__));
12063 
12064 	if (sc->sc_type == WM_T_82573) {
12065 		reg = CSR_READ(sc, WMREG_SWSM);
12066 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
12067 	} else if (sc->sc_type >= WM_T_82571) {
12068 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12069 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
12070 	}
12071 }
12072 
12073 static void
12074 wm_release_hw_control(struct wm_softc *sc)
12075 {
12076 	uint32_t reg;
12077 
12078 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12079 		device_xname(sc->sc_dev), __func__));
12080 
12081 	if (sc->sc_type == WM_T_82573) {
12082 		reg = CSR_READ(sc, WMREG_SWSM);
12083 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
12084 	} else if (sc->sc_type >= WM_T_82571) {
12085 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12086 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
12087 	}
12088 }
12089 
12090 static void
12091 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
12092 {
12093 	uint32_t reg;
12094 
12095 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12096 		device_xname(sc->sc_dev), __func__));
12097 
12098 	if (sc->sc_type < WM_T_PCH2)
12099 		return;
12100 
12101 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
12102 
12103 	if (gate)
12104 		reg |= EXTCNFCTR_GATE_PHY_CFG;
12105 	else
12106 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
12107 
12108 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
12109 }
12110 
12111 static void
12112 wm_smbustopci(struct wm_softc *sc)
12113 {
12114 	uint32_t fwsm, reg;
12115 	int rv = 0;
12116 
12117 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12118 		device_xname(sc->sc_dev), __func__));
12119 
12120 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
12121 	wm_gate_hw_phy_config_ich8lan(sc, true);
12122 
12123 	/* Disable ULP */
12124 	wm_ulp_disable(sc);
12125 
12126 	/* Acquire PHY semaphore */
12127 	sc->phy.acquire(sc);
12128 
12129 	fwsm = CSR_READ(sc, WMREG_FWSM);
12130 	switch (sc->sc_type) {
12131 	case WM_T_PCH_LPT:
12132 	case WM_T_PCH_SPT:
12133 		if (wm_phy_is_accessible_pchlan(sc))
12134 			break;
12135 
12136 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12137 		reg |= CTRL_EXT_FORCE_SMBUS;
12138 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12139 #if 0
12140 		/* XXX Isn't this required??? */
12141 		CSR_WRITE_FLUSH(sc);
12142 #endif
12143 		delay(50 * 1000);
12144 		/* FALLTHROUGH */
12145 	case WM_T_PCH2:
12146 		if (wm_phy_is_accessible_pchlan(sc) == true)
12147 			break;
12148 		/* FALLTHROUGH */
12149 	case WM_T_PCH:
12150 		if (sc->sc_type == WM_T_PCH)
12151 			if ((fwsm & FWSM_FW_VALID) != 0)
12152 				break;
12153 
12154 		if (wm_phy_resetisblocked(sc) == true) {
12155 			printf("XXX reset is blocked(3)\n");
12156 			break;
12157 		}
12158 
12159 		wm_toggle_lanphypc_pch_lpt(sc);
12160 
12161 		if (sc->sc_type >= WM_T_PCH_LPT) {
12162 			if (wm_phy_is_accessible_pchlan(sc) == true)
12163 				break;
12164 
12165 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
12166 			reg &= ~CTRL_EXT_FORCE_SMBUS;
12167 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12168 
12169 			if (wm_phy_is_accessible_pchlan(sc) == true)
12170 				break;
12171 			rv = -1;
12172 		}
12173 		break;
12174 	default:
12175 		break;
12176 	}
12177 
12178 	/* Release semaphore */
12179 	sc->phy.release(sc);
12180 
12181 	if (rv == 0) {
12182 		if (wm_phy_resetisblocked(sc)) {
12183 			printf("XXX reset is blocked(4)\n");
12184 			goto out;
12185 		}
12186 		wm_reset_phy(sc);
12187 		if (wm_phy_resetisblocked(sc))
12188 			printf("XXX reset is blocked(4)\n");
12189 	}
12190 
12191 out:
12192 	/*
12193 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
12194 	 */
12195 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
12196 		delay(10*1000);
12197 		wm_gate_hw_phy_config_ich8lan(sc, false);
12198 	}
12199 }
12200 
12201 static void
12202 wm_init_manageability(struct wm_softc *sc)
12203 {
12204 
12205 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12206 		device_xname(sc->sc_dev), __func__));
12207 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
12208 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
12209 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
12210 
12211 		/* Disable hardware interception of ARP */
12212 		manc &= ~MANC_ARP_EN;
12213 
12214 		/* Enable receiving management packets to the host */
12215 		if (sc->sc_type >= WM_T_82571) {
12216 			manc |= MANC_EN_MNG2HOST;
12217 			manc2h |= MANC2H_PORT_623| MANC2H_PORT_624;
12218 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
12219 		}
12220 
12221 		CSR_WRITE(sc, WMREG_MANC, manc);
12222 	}
12223 }
12224 
12225 static void
12226 wm_release_manageability(struct wm_softc *sc)
12227 {
12228 
12229 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
12230 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
12231 
12232 		manc |= MANC_ARP_EN;
12233 		if (sc->sc_type >= WM_T_82571)
12234 			manc &= ~MANC_EN_MNG2HOST;
12235 
12236 		CSR_WRITE(sc, WMREG_MANC, manc);
12237 	}
12238 }
12239 
12240 static void
12241 wm_get_wakeup(struct wm_softc *sc)
12242 {
12243 
12244 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
12245 	switch (sc->sc_type) {
12246 	case WM_T_82573:
12247 	case WM_T_82583:
12248 		sc->sc_flags |= WM_F_HAS_AMT;
12249 		/* FALLTHROUGH */
12250 	case WM_T_80003:
12251 	case WM_T_82575:
12252 	case WM_T_82576:
12253 	case WM_T_82580:
12254 	case WM_T_I350:
12255 	case WM_T_I354:
12256 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
12257 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
12258 		/* FALLTHROUGH */
12259 	case WM_T_82541:
12260 	case WM_T_82541_2:
12261 	case WM_T_82547:
12262 	case WM_T_82547_2:
12263 	case WM_T_82571:
12264 	case WM_T_82572:
12265 	case WM_T_82574:
12266 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
12267 		break;
12268 	case WM_T_ICH8:
12269 	case WM_T_ICH9:
12270 	case WM_T_ICH10:
12271 	case WM_T_PCH:
12272 	case WM_T_PCH2:
12273 	case WM_T_PCH_LPT:
12274 	case WM_T_PCH_SPT:
12275 		sc->sc_flags |= WM_F_HAS_AMT;
12276 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
12277 		break;
12278 	default:
12279 		break;
12280 	}
12281 
12282 	/* 1: HAS_MANAGE */
12283 	if (wm_enable_mng_pass_thru(sc) != 0)
12284 		sc->sc_flags |= WM_F_HAS_MANAGE;
12285 
12286 #ifdef WM_DEBUG
12287 	printf("\n");
12288 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
12289 		printf("HAS_AMT,");
12290 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0)
12291 		printf("ARC_SUBSYS_VALID,");
12292 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) != 0)
12293 		printf("ASF_FIRMWARE_PRES,");
12294 	if ((sc->sc_flags & WM_F_HAS_MANAGE) != 0)
12295 		printf("HAS_MANAGE,");
12296 	printf("\n");
12297 #endif
12298 	/*
12299 	 * Note that the WOL flags is set after the resetting of the eeprom
12300 	 * stuff
12301 	 */
12302 }
12303 
12304 /*
12305  * Unconfigure Ultra Low Power mode.
12306  * Only for I217 and newer (see below).
12307  */
12308 static void
12309 wm_ulp_disable(struct wm_softc *sc)
12310 {
12311 	uint32_t reg;
12312 	int i = 0;
12313 
12314 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12315 		device_xname(sc->sc_dev), __func__));
12316 	/* Exclude old devices */
12317 	if ((sc->sc_type < WM_T_PCH_LPT)
12318 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
12319 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
12320 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
12321 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
12322 		return;
12323 
12324 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
12325 		/* Request ME un-configure ULP mode in the PHY */
12326 		reg = CSR_READ(sc, WMREG_H2ME);
12327 		reg &= ~H2ME_ULP;
12328 		reg |= H2ME_ENFORCE_SETTINGS;
12329 		CSR_WRITE(sc, WMREG_H2ME, reg);
12330 
12331 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
12332 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
12333 			if (i++ == 30) {
12334 				printf("%s timed out\n", __func__);
12335 				return;
12336 			}
12337 			delay(10 * 1000);
12338 		}
12339 		reg = CSR_READ(sc, WMREG_H2ME);
12340 		reg &= ~H2ME_ENFORCE_SETTINGS;
12341 		CSR_WRITE(sc, WMREG_H2ME, reg);
12342 
12343 		return;
12344 	}
12345 
12346 	/* Acquire semaphore */
12347 	sc->phy.acquire(sc);
12348 
12349 	/* Toggle LANPHYPC */
12350 	wm_toggle_lanphypc_pch_lpt(sc);
12351 
12352 	/* Unforce SMBus mode in PHY */
12353 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
12354 	if (reg == 0x0000 || reg == 0xffff) {
12355 		uint32_t reg2;
12356 
12357 		printf("%s: Force SMBus first.\n", __func__);
12358 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
12359 		reg2 |= CTRL_EXT_FORCE_SMBUS;
12360 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
12361 		delay(50 * 1000);
12362 
12363 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
12364 	}
12365 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
12366 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
12367 
12368 	/* Unforce SMBus mode in MAC */
12369 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
12370 	reg &= ~CTRL_EXT_FORCE_SMBUS;
12371 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12372 
12373 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
12374 	reg |= HV_PM_CTRL_K1_ENA;
12375 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
12376 
12377 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
12378 	reg &= ~(I218_ULP_CONFIG1_IND
12379 	    | I218_ULP_CONFIG1_STICKY_ULP
12380 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
12381 	    | I218_ULP_CONFIG1_WOL_HOST
12382 	    | I218_ULP_CONFIG1_INBAND_EXIT
12383 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
12384 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
12385 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
12386 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
12387 	reg |= I218_ULP_CONFIG1_START;
12388 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
12389 
12390 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
12391 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
12392 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
12393 
12394 	/* Release semaphore */
12395 	sc->phy.release(sc);
12396 	wm_gmii_reset(sc);
12397 	delay(50 * 1000);
12398 }
12399 
12400 /* WOL in the newer chipset interfaces (pchlan) */
12401 static void
12402 wm_enable_phy_wakeup(struct wm_softc *sc)
12403 {
12404 #if 0
12405 	uint16_t preg;
12406 
12407 	/* Copy MAC RARs to PHY RARs */
12408 
12409 	/* Copy MAC MTA to PHY MTA */
12410 
12411 	/* Configure PHY Rx Control register */
12412 
12413 	/* Enable PHY wakeup in MAC register */
12414 
12415 	/* Configure and enable PHY wakeup in PHY registers */
12416 
12417 	/* Activate PHY wakeup */
12418 
12419 	/* XXX */
12420 #endif
12421 }
12422 
12423 /* Power down workaround on D3 */
12424 static void
12425 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
12426 {
12427 	uint32_t reg;
12428 	int i;
12429 
12430 	for (i = 0; i < 2; i++) {
12431 		/* Disable link */
12432 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
12433 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12434 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12435 
12436 		/*
12437 		 * Call gig speed drop workaround on Gig disable before
12438 		 * accessing any PHY registers
12439 		 */
12440 		if (sc->sc_type == WM_T_ICH8)
12441 			wm_gig_downshift_workaround_ich8lan(sc);
12442 
12443 		/* Write VR power-down enable */
12444 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12445 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12446 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
12447 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
12448 
12449 		/* Read it back and test */
12450 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
12451 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
12452 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
12453 			break;
12454 
12455 		/* Issue PHY reset and repeat at most one more time */
12456 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
12457 	}
12458 }
12459 
12460 static void
12461 wm_enable_wakeup(struct wm_softc *sc)
12462 {
12463 	uint32_t reg, pmreg;
12464 	pcireg_t pmode;
12465 
12466 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12467 		device_xname(sc->sc_dev), __func__));
12468 
12469 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
12470 		&pmreg, NULL) == 0)
12471 		return;
12472 
12473 	/* Advertise the wakeup capability */
12474 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
12475 	    | CTRL_SWDPIN(3));
12476 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
12477 
12478 	/* ICH workaround */
12479 	switch (sc->sc_type) {
12480 	case WM_T_ICH8:
12481 	case WM_T_ICH9:
12482 	case WM_T_ICH10:
12483 	case WM_T_PCH:
12484 	case WM_T_PCH2:
12485 	case WM_T_PCH_LPT:
12486 	case WM_T_PCH_SPT:
12487 		/* Disable gig during WOL */
12488 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
12489 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
12490 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12491 		if (sc->sc_type == WM_T_PCH)
12492 			wm_gmii_reset(sc);
12493 
12494 		/* Power down workaround */
12495 		if (sc->sc_phytype == WMPHY_82577) {
12496 			struct mii_softc *child;
12497 
12498 			/* Assume that the PHY is copper */
12499 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
12500 			if (child->mii_mpd_rev <= 2)
12501 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
12502 				    (768 << 5) | 25, 0x0444); /* magic num */
12503 		}
12504 		break;
12505 	default:
12506 		break;
12507 	}
12508 
12509 	/* Keep the laser running on fiber adapters */
12510 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
12511 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
12512 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
12513 		reg |= CTRL_EXT_SWDPIN(3);
12514 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12515 	}
12516 
12517 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
12518 #if 0	/* for the multicast packet */
12519 	reg |= WUFC_MC;
12520 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
12521 #endif
12522 
12523 	if (sc->sc_type >= WM_T_PCH)
12524 		wm_enable_phy_wakeup(sc);
12525 	else {
12526 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
12527 		CSR_WRITE(sc, WMREG_WUFC, reg);
12528 	}
12529 
12530 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12531 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12532 		|| (sc->sc_type == WM_T_PCH2))
12533 		    && (sc->sc_phytype == WMPHY_IGP_3))
12534 			wm_igp3_phy_powerdown_workaround_ich8lan(sc);
12535 
12536 	/* Request PME */
12537 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
12538 #if 0
12539 	/* Disable WOL */
12540 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
12541 #else
12542 	/* For WOL */
12543 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
12544 #endif
12545 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
12546 }
12547 
12548 /* LPLU */
12549 
12550 static void
12551 wm_lplu_d0_disable(struct wm_softc *sc)
12552 {
12553 	uint32_t reg;
12554 
12555 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12556 		device_xname(sc->sc_dev), __func__));
12557 
12558 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
12559 	reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
12560 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12561 }
12562 
12563 static void
12564 wm_lplu_d0_disable_pch(struct wm_softc *sc)
12565 {
12566 	uint32_t reg;
12567 
12568 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12569 		device_xname(sc->sc_dev), __func__));
12570 
12571 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
12572 	reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
12573 	reg |= HV_OEM_BITS_ANEGNOW;
12574 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
12575 }
12576 
12577 /* EEE */
12578 
12579 static void
12580 wm_set_eee_i350(struct wm_softc *sc)
12581 {
12582 	uint32_t ipcnfg, eeer;
12583 
12584 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
12585 	eeer = CSR_READ(sc, WMREG_EEER);
12586 
12587 	if ((sc->sc_flags & WM_F_EEE) != 0) {
12588 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12589 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
12590 		    | EEER_LPI_FC);
12591 	} else {
12592 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
12593 		ipcnfg &= ~IPCNFG_10BASE_TE;
12594 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
12595 		    | EEER_LPI_FC);
12596 	}
12597 
12598 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
12599 	CSR_WRITE(sc, WMREG_EEER, eeer);
12600 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
12601 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
12602 }
12603 
12604 /*
12605  * Workarounds (mainly PHY related).
12606  * Basically, PHY's workarounds are in the PHY drivers.
12607  */
12608 
12609 /* Work-around for 82566 Kumeran PCS lock loss */
12610 static void
12611 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
12612 {
12613 #if 0
12614 	int miistatus, active, i;
12615 	int reg;
12616 
12617 	miistatus = sc->sc_mii.mii_media_status;
12618 
12619 	/* If the link is not up, do nothing */
12620 	if ((miistatus & IFM_ACTIVE) == 0)
12621 		return;
12622 
12623 	active = sc->sc_mii.mii_media_active;
12624 
12625 	/* Nothing to do if the link is other than 1Gbps */
12626 	if (IFM_SUBTYPE(active) != IFM_1000_T)
12627 		return;
12628 
12629 	for (i = 0; i < 10; i++) {
12630 		/* read twice */
12631 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12632 		reg = wm_gmii_i80003_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
12633 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
12634 			goto out;	/* GOOD! */
12635 
12636 		/* Reset the PHY */
12637 		wm_gmii_reset(sc);
12638 		delay(5*1000);
12639 	}
12640 
12641 	/* Disable GigE link negotiation */
12642 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
12643 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
12644 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
12645 
12646 	/*
12647 	 * Call gig speed drop workaround on Gig disable before accessing
12648 	 * any PHY registers.
12649 	 */
12650 	wm_gig_downshift_workaround_ich8lan(sc);
12651 
12652 out:
12653 	return;
12654 #endif
12655 }
12656 
12657 /* WOL from S5 stops working */
12658 static void
12659 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
12660 {
12661 	uint16_t kmrn_reg;
12662 
12663 	/* Only for igp3 */
12664 	if (sc->sc_phytype == WMPHY_IGP_3) {
12665 		kmrn_reg = wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG);
12666 		kmrn_reg |= KUMCTRLSTA_DIAG_NELPBK;
12667 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12668 		kmrn_reg &= ~KUMCTRLSTA_DIAG_NELPBK;
12669 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmrn_reg);
12670 	}
12671 }
12672 
12673 /*
12674  * Workaround for pch's PHYs
12675  * XXX should be moved to new PHY driver?
12676  */
12677 static void
12678 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
12679 {
12680 
12681 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12682 		device_xname(sc->sc_dev), __func__));
12683 	KASSERT(sc->sc_type == WM_T_PCH);
12684 
12685 	if (sc->sc_phytype == WMPHY_82577)
12686 		wm_set_mdio_slow_mode_hv(sc);
12687 
12688 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
12689 
12690 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
12691 
12692 	/* 82578 */
12693 	if (sc->sc_phytype == WMPHY_82578) {
12694 		struct mii_softc *child;
12695 
12696 		/*
12697 		 * Return registers to default by doing a soft reset then
12698 		 * writing 0x3140 to the control register
12699 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
12700 		 */
12701 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
12702 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
12703 			PHY_RESET(child);
12704 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
12705 			    0x3140);
12706 		}
12707 	}
12708 
12709 	/* Select page 0 */
12710 	sc->phy.acquire(sc);
12711 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
12712 	sc->phy.release(sc);
12713 
12714 	/*
12715 	 * Configure the K1 Si workaround during phy reset assuming there is
12716 	 * link so that it disables K1 if link is in 1Gbps.
12717 	 */
12718 	wm_k1_gig_workaround_hv(sc, 1);
12719 }
12720 
12721 static void
12722 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
12723 {
12724 
12725 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12726 		device_xname(sc->sc_dev), __func__));
12727 	KASSERT(sc->sc_type == WM_T_PCH2);
12728 
12729 	wm_set_mdio_slow_mode_hv(sc);
12730 }
12731 
12732 static int
12733 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
12734 {
12735 	int k1_enable = sc->sc_nvm_k1_enabled;
12736 
12737 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12738 		device_xname(sc->sc_dev), __func__));
12739 
12740 	if (sc->phy.acquire(sc) != 0)
12741 		return -1;
12742 
12743 	if (link) {
12744 		k1_enable = 0;
12745 
12746 		/* Link stall fix for link up */
12747 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x0100);
12748 	} else {
12749 		/* Link stall fix for link down */
12750 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG, 0x4100);
12751 	}
12752 
12753 	wm_configure_k1_ich8lan(sc, k1_enable);
12754 	sc->phy.release(sc);
12755 
12756 	return 0;
12757 }
12758 
12759 static void
12760 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
12761 {
12762 	uint32_t reg;
12763 
12764 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
12765 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
12766 	    reg | HV_KMRN_MDIO_SLOW);
12767 }
12768 
12769 static void
12770 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
12771 {
12772 	uint32_t ctrl, ctrl_ext, tmp;
12773 	uint16_t kmrn_reg;
12774 
12775 	kmrn_reg = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG);
12776 
12777 	if (k1_enable)
12778 		kmrn_reg |= KUMCTRLSTA_K1_ENABLE;
12779 	else
12780 		kmrn_reg &= ~KUMCTRLSTA_K1_ENABLE;
12781 
12782 	wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmrn_reg);
12783 
12784 	delay(20);
12785 
12786 	ctrl = CSR_READ(sc, WMREG_CTRL);
12787 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
12788 
12789 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
12790 	tmp |= CTRL_FRCSPD;
12791 
12792 	CSR_WRITE(sc, WMREG_CTRL, tmp);
12793 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
12794 	CSR_WRITE_FLUSH(sc);
12795 	delay(20);
12796 
12797 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
12798 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
12799 	CSR_WRITE_FLUSH(sc);
12800 	delay(20);
12801 }
12802 
12803 /* special case - for 82575 - need to do manual init ... */
12804 static void
12805 wm_reset_init_script_82575(struct wm_softc *sc)
12806 {
12807 	/*
12808 	 * remark: this is untested code - we have no board without EEPROM
12809 	 *  same setup as mentioned int the FreeBSD driver for the i82575
12810 	 */
12811 
12812 	/* SerDes configuration via SERDESCTRL */
12813 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
12814 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
12815 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
12816 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
12817 
12818 	/* CCM configuration via CCMCTL register */
12819 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
12820 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
12821 
12822 	/* PCIe lanes configuration */
12823 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
12824 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
12825 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
12826 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
12827 
12828 	/* PCIe PLL Configuration */
12829 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
12830 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
12831 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
12832 }
12833 
12834 static void
12835 wm_reset_mdicnfg_82580(struct wm_softc *sc)
12836 {
12837 	uint32_t reg;
12838 	uint16_t nvmword;
12839 	int rv;
12840 
12841 	if ((sc->sc_flags & WM_F_SGMII) == 0)
12842 		return;
12843 
12844 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
12845 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
12846 	if (rv != 0) {
12847 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
12848 		    __func__);
12849 		return;
12850 	}
12851 
12852 	reg = CSR_READ(sc, WMREG_MDICNFG);
12853 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
12854 		reg |= MDICNFG_DEST;
12855 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
12856 		reg |= MDICNFG_COM_MDIO;
12857 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
12858 }
12859 
12860 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
12861 
12862 static bool
12863 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
12864 {
12865 	int i;
12866 	uint32_t reg;
12867 	uint16_t id1, id2;
12868 
12869 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12870 		device_xname(sc->sc_dev), __func__));
12871 	id1 = id2 = 0xffff;
12872 	for (i = 0; i < 2; i++) {
12873 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
12874 		if (MII_INVALIDID(id1))
12875 			continue;
12876 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
12877 		if (MII_INVALIDID(id2))
12878 			continue;
12879 		break;
12880 	}
12881 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
12882 		goto out;
12883 	}
12884 
12885 	if (sc->sc_type < WM_T_PCH_LPT) {
12886 		sc->phy.release(sc);
12887 		wm_set_mdio_slow_mode_hv(sc);
12888 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
12889 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
12890 		sc->phy.acquire(sc);
12891 	}
12892 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
12893 		printf("XXX return with false\n");
12894 		return false;
12895 	}
12896 out:
12897 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)) {
12898 		/* Only unforce SMBus if ME is not active */
12899 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
12900 			/* Unforce SMBus mode in PHY */
12901 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
12902 			    CV_SMB_CTRL);
12903 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
12904 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
12905 			    CV_SMB_CTRL, reg);
12906 
12907 			/* Unforce SMBus mode in MAC */
12908 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
12909 			reg &= ~CTRL_EXT_FORCE_SMBUS;
12910 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
12911 		}
12912 	}
12913 	return true;
12914 }
12915 
12916 static void
12917 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
12918 {
12919 	uint32_t reg;
12920 	int i;
12921 
12922 	/* Set PHY Config Counter to 50msec */
12923 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
12924 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
12925 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
12926 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
12927 
12928 	/* Toggle LANPHYPC */
12929 	reg = CSR_READ(sc, WMREG_CTRL);
12930 	reg |= CTRL_LANPHYPC_OVERRIDE;
12931 	reg &= ~CTRL_LANPHYPC_VALUE;
12932 	CSR_WRITE(sc, WMREG_CTRL, reg);
12933 	CSR_WRITE_FLUSH(sc);
12934 	delay(1000);
12935 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
12936 	CSR_WRITE(sc, WMREG_CTRL, reg);
12937 	CSR_WRITE_FLUSH(sc);
12938 
12939 	if (sc->sc_type < WM_T_PCH_LPT)
12940 		delay(50 * 1000);
12941 	else {
12942 		i = 20;
12943 
12944 		do {
12945 			delay(5 * 1000);
12946 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
12947 		    && i--);
12948 
12949 		delay(30 * 1000);
12950 	}
12951 }
12952 
12953 static int
12954 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
12955 {
12956 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
12957 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
12958 	uint32_t rxa;
12959 	uint16_t scale = 0, lat_enc = 0;
12960 	int64_t lat_ns, value;
12961 
12962 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
12963 		device_xname(sc->sc_dev), __func__));
12964 
12965 	if (link) {
12966 		pcireg_t preg;
12967 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
12968 
12969 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
12970 
12971 		/*
12972 		 * Determine the maximum latency tolerated by the device.
12973 		 *
12974 		 * Per the PCIe spec, the tolerated latencies are encoded as
12975 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
12976 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
12977 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
12978 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
12979 		 */
12980 		lat_ns = ((int64_t)rxa * 1024 -
12981 		    (2 * (int64_t)sc->sc_ethercom.ec_if.if_mtu)) * 8 * 1000;
12982 		if (lat_ns < 0)
12983 			lat_ns = 0;
12984 		else {
12985 			uint32_t status;
12986 			uint16_t speed;
12987 
12988 			status = CSR_READ(sc, WMREG_STATUS);
12989 			switch (__SHIFTOUT(status, STATUS_SPEED)) {
12990 			case STATUS_SPEED_10:
12991 				speed = 10;
12992 				break;
12993 			case STATUS_SPEED_100:
12994 				speed = 100;
12995 				break;
12996 			case STATUS_SPEED_1000:
12997 				speed = 1000;
12998 				break;
12999 			default:
13000 				printf("%s: Unknown speed (status = %08x)\n",
13001 				    device_xname(sc->sc_dev), status);
13002 				return -1;
13003 			}
13004 			lat_ns /= speed;
13005 		}
13006 		value = lat_ns;
13007 
13008 		while (value > LTRV_VALUE) {
13009 			scale ++;
13010 			value = howmany(value, __BIT(5));
13011 		}
13012 		if (scale > LTRV_SCALE_MAX) {
13013 			printf("%s: Invalid LTR latency scale %d\n",
13014 			    device_xname(sc->sc_dev), scale);
13015 			return -1;
13016 		}
13017 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
13018 
13019 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13020 		    WM_PCI_LTR_CAP_LPT);
13021 		max_snoop = preg & 0xffff;
13022 		max_nosnoop = preg >> 16;
13023 
13024 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
13025 
13026 		if (lat_enc > max_ltr_enc) {
13027 			lat_enc = max_ltr_enc;
13028 		}
13029 	}
13030 	/* Snoop and No-Snoop latencies the same */
13031 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
13032 	CSR_WRITE(sc, WMREG_LTRV, reg);
13033 
13034 	return 0;
13035 }
13036 
13037 /*
13038  * I210 Errata 25 and I211 Errata 10
13039  * Slow System Clock.
13040  */
13041 static void
13042 wm_pll_workaround_i210(struct wm_softc *sc)
13043 {
13044 	uint32_t mdicnfg, wuc;
13045 	uint32_t reg;
13046 	pcireg_t pcireg;
13047 	uint32_t pmreg;
13048 	uint16_t nvmword, tmp_nvmword;
13049 	int phyval;
13050 	bool wa_done = false;
13051 	int i;
13052 
13053 	/* Save WUC and MDICNFG registers */
13054 	wuc = CSR_READ(sc, WMREG_WUC);
13055 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
13056 
13057 	reg = mdicnfg & ~MDICNFG_DEST;
13058 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
13059 
13060 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
13061 		nvmword = INVM_DEFAULT_AL;
13062 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
13063 
13064 	/* Get Power Management cap offset */
13065 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
13066 		&pmreg, NULL) == 0)
13067 		return;
13068 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
13069 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
13070 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
13071 
13072 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
13073 			break; /* OK */
13074 		}
13075 
13076 		wa_done = true;
13077 		/* Directly reset the internal PHY */
13078 		reg = CSR_READ(sc, WMREG_CTRL);
13079 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
13080 
13081 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13082 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
13083 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13084 
13085 		CSR_WRITE(sc, WMREG_WUC, 0);
13086 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
13087 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13088 
13089 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
13090 		    pmreg + PCI_PMCSR);
13091 		pcireg |= PCI_PMCSR_STATE_D3;
13092 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13093 		    pmreg + PCI_PMCSR, pcireg);
13094 		delay(1000);
13095 		pcireg &= ~PCI_PMCSR_STATE_D3;
13096 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
13097 		    pmreg + PCI_PMCSR, pcireg);
13098 
13099 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
13100 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
13101 
13102 		/* Restore WUC register */
13103 		CSR_WRITE(sc, WMREG_WUC, wuc);
13104 	}
13105 
13106 	/* Restore MDICNFG setting */
13107 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
13108 	if (wa_done)
13109 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
13110 }
13111