xref: /netbsd-src/sys/dev/pci/if_wm.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: if_wm.c,v 1.583 2018/06/26 06:48:01 msaitoh Exp $	*/
2 
3 /*
4  * Copyright (c) 2001, 2002, 2003, 2004 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*******************************************************************************
39 
40   Copyright (c) 2001-2005, Intel Corporation
41   All rights reserved.
42 
43   Redistribution and use in source and binary forms, with or without
44   modification, are permitted provided that the following conditions are met:
45 
46    1. Redistributions of source code must retain the above copyright notice,
47       this list of conditions and the following disclaimer.
48 
49    2. Redistributions in binary form must reproduce the above copyright
50       notice, this list of conditions and the following disclaimer in the
51       documentation and/or other materials provided with the distribution.
52 
53    3. Neither the name of the Intel Corporation nor the names of its
54       contributors may be used to endorse or promote products derived from
55       this software without specific prior written permission.
56 
57   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
58   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
59   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
60   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
61   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67   POSSIBILITY OF SUCH DAMAGE.
68 
69 *******************************************************************************/
70 /*
71  * Device driver for the Intel i8254x family of Gigabit Ethernet chips.
72  *
73  * TODO (in order of importance):
74  *
75  *	- Check XXX'ed comments
76  *	- TX Multi queue improvement (refine queue selection logic)
77  *	- Split header buffer for newer descriptors
78  *	- EEE (Energy Efficiency Ethernet)
79  *	- Virtual Function
80  *	- Set LED correctly (based on contents in EEPROM)
81  *	- Rework how parameters are loaded from the EEPROM.
82  *	- Image Unique ID
83  */
84 
85 #include <sys/cdefs.h>
86 __KERNEL_RCSID(0, "$NetBSD: if_wm.c,v 1.583 2018/06/26 06:48:01 msaitoh Exp $");
87 
88 #ifdef _KERNEL_OPT
89 #include "opt_net_mpsafe.h"
90 #include "opt_if_wm.h"
91 #endif
92 
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/callout.h>
96 #include <sys/mbuf.h>
97 #include <sys/malloc.h>
98 #include <sys/kmem.h>
99 #include <sys/kernel.h>
100 #include <sys/socket.h>
101 #include <sys/ioctl.h>
102 #include <sys/errno.h>
103 #include <sys/device.h>
104 #include <sys/queue.h>
105 #include <sys/syslog.h>
106 #include <sys/interrupt.h>
107 #include <sys/cpu.h>
108 #include <sys/pcq.h>
109 
110 #include <sys/rndsource.h>
111 
112 #include <net/if.h>
113 #include <net/if_dl.h>
114 #include <net/if_media.h>
115 #include <net/if_ether.h>
116 
117 #include <net/bpf.h>
118 
119 #include <net/rss_config.h>
120 
121 #include <netinet/in.h>			/* XXX for struct ip */
122 #include <netinet/in_systm.h>		/* XXX for struct ip */
123 #include <netinet/ip.h>			/* XXX for struct ip */
124 #include <netinet/ip6.h>		/* XXX for struct ip6_hdr */
125 #include <netinet/tcp.h>		/* XXX for struct tcphdr */
126 
127 #include <sys/bus.h>
128 #include <sys/intr.h>
129 #include <machine/endian.h>
130 
131 #include <dev/mii/mii.h>
132 #include <dev/mii/miivar.h>
133 #include <dev/mii/miidevs.h>
134 #include <dev/mii/mii_bitbang.h>
135 #include <dev/mii/ikphyreg.h>
136 #include <dev/mii/igphyreg.h>
137 #include <dev/mii/igphyvar.h>
138 #include <dev/mii/inbmphyreg.h>
139 #include <dev/mii/ihphyreg.h>
140 
141 #include <dev/pci/pcireg.h>
142 #include <dev/pci/pcivar.h>
143 #include <dev/pci/pcidevs.h>
144 
145 #include <dev/pci/if_wmreg.h>
146 #include <dev/pci/if_wmvar.h>
147 
148 #ifdef WM_DEBUG
149 #define	WM_DEBUG_LINK		__BIT(0)
150 #define	WM_DEBUG_TX		__BIT(1)
151 #define	WM_DEBUG_RX		__BIT(2)
152 #define	WM_DEBUG_GMII		__BIT(3)
153 #define	WM_DEBUG_MANAGE		__BIT(4)
154 #define	WM_DEBUG_NVM		__BIT(5)
155 #define	WM_DEBUG_INIT		__BIT(6)
156 #define	WM_DEBUG_LOCK		__BIT(7)
157 int	wm_debug = WM_DEBUG_TX | WM_DEBUG_RX | WM_DEBUG_LINK | WM_DEBUG_GMII
158     | WM_DEBUG_MANAGE | WM_DEBUG_NVM | WM_DEBUG_INIT | WM_DEBUG_LOCK;
159 
160 #define	DPRINTF(x, y)	if (wm_debug & (x)) printf y
161 #else
162 #define	DPRINTF(x, y)	/* nothing */
163 #endif /* WM_DEBUG */
164 
165 #ifdef NET_MPSAFE
166 #define WM_MPSAFE	1
167 #define CALLOUT_FLAGS	CALLOUT_MPSAFE
168 #else
169 #define CALLOUT_FLAGS	0
170 #endif
171 
172 /*
173  * This device driver's max interrupt numbers.
174  */
175 #define WM_MAX_NQUEUEINTR	16
176 #define WM_MAX_NINTR		(WM_MAX_NQUEUEINTR + 1)
177 
178 #ifndef WM_DISABLE_MSI
179 #define	WM_DISABLE_MSI 0
180 #endif
181 #ifndef WM_DISABLE_MSIX
182 #define	WM_DISABLE_MSIX 0
183 #endif
184 
185 int wm_disable_msi = WM_DISABLE_MSI;
186 int wm_disable_msix = WM_DISABLE_MSIX;
187 
188 #ifndef WM_WATCHDOG_TIMEOUT
189 #define WM_WATCHDOG_TIMEOUT 5
190 #endif
191 static int wm_watchdog_timeout = WM_WATCHDOG_TIMEOUT;
192 
193 /*
194  * Transmit descriptor list size.  Due to errata, we can only have
195  * 256 hardware descriptors in the ring on < 82544, but we use 4096
196  * on >= 82544. We tell the upper layers that they can queue a lot
197  * of packets, and we go ahead and manage up to 64 (16 for the i82547)
198  * of them at a time.
199  *
200  * We allow up to 256 (!) DMA segments per packet.  Pathological packet
201  * chains containing many small mbufs have been observed in zero-copy
202  * situations with jumbo frames.
203  */
204 #define	WM_NTXSEGS		256
205 #define	WM_IFQUEUELEN		256
206 #define	WM_TXQUEUELEN_MAX	64
207 #define	WM_TXQUEUELEN_MAX_82547	16
208 #define	WM_TXQUEUELEN(txq)	((txq)->txq_num)
209 #define	WM_TXQUEUELEN_MASK(txq)	(WM_TXQUEUELEN(txq) - 1)
210 #define	WM_TXQUEUE_GC(txq)	(WM_TXQUEUELEN(txq) / 8)
211 #define	WM_NTXDESC_82542	256
212 #define	WM_NTXDESC_82544	4096
213 #define	WM_NTXDESC(txq)		((txq)->txq_ndesc)
214 #define	WM_NTXDESC_MASK(txq)	(WM_NTXDESC(txq) - 1)
215 #define	WM_TXDESCS_SIZE(txq)	(WM_NTXDESC(txq) * (txq)->txq_descsize)
216 #define	WM_NEXTTX(txq, x)	(((x) + 1) & WM_NTXDESC_MASK(txq))
217 #define	WM_NEXTTXS(txq, x)	(((x) + 1) & WM_TXQUEUELEN_MASK(txq))
218 
219 #define	WM_MAXTXDMA		 (2 * round_page(IP_MAXPACKET)) /* for TSO */
220 
221 #define	WM_TXINTERQSIZE		256
222 
223 #ifndef WM_TX_PROCESS_LIMIT_DEFAULT
224 #define	WM_TX_PROCESS_LIMIT_DEFAULT		100U
225 #endif
226 #ifndef WM_TX_INTR_PROCESS_LIMIT_DEFAULT
227 #define	WM_TX_INTR_PROCESS_LIMIT_DEFAULT	0U
228 #endif
229 
230 /*
231  * Receive descriptor list size.  We have one Rx buffer for normal
232  * sized packets.  Jumbo packets consume 5 Rx buffers for a full-sized
233  * packet.  We allocate 256 receive descriptors, each with a 2k
234  * buffer (MCLBYTES), which gives us room for 50 jumbo packets.
235  */
236 #define	WM_NRXDESC		256
237 #define	WM_NRXDESC_MASK		(WM_NRXDESC - 1)
238 #define	WM_NEXTRX(x)		(((x) + 1) & WM_NRXDESC_MASK)
239 #define	WM_PREVRX(x)		(((x) - 1) & WM_NRXDESC_MASK)
240 
241 #ifndef WM_RX_PROCESS_LIMIT_DEFAULT
242 #define	WM_RX_PROCESS_LIMIT_DEFAULT		100U
243 #endif
244 #ifndef WM_RX_INTR_PROCESS_LIMIT_DEFAULT
245 #define	WM_RX_INTR_PROCESS_LIMIT_DEFAULT	0U
246 #endif
247 
248 typedef union txdescs {
249 	wiseman_txdesc_t sctxu_txdescs[WM_NTXDESC_82544];
250 	nq_txdesc_t	 sctxu_nq_txdescs[WM_NTXDESC_82544];
251 } txdescs_t;
252 
253 typedef union rxdescs {
254 	wiseman_rxdesc_t sctxu_rxdescs[WM_NRXDESC];
255 	ext_rxdesc_t	  sctxu_ext_rxdescs[WM_NRXDESC]; /* 82574 only */
256 	nq_rxdesc_t	 sctxu_nq_rxdescs[WM_NRXDESC]; /* 82575 and newer */
257 } rxdescs_t;
258 
259 #define	WM_CDTXOFF(txq, x)	((txq)->txq_descsize * (x))
260 #define	WM_CDRXOFF(rxq, x)	((rxq)->rxq_descsize * (x))
261 
262 /*
263  * Software state for transmit jobs.
264  */
265 struct wm_txsoft {
266 	struct mbuf *txs_mbuf;		/* head of our mbuf chain */
267 	bus_dmamap_t txs_dmamap;	/* our DMA map */
268 	int txs_firstdesc;		/* first descriptor in packet */
269 	int txs_lastdesc;		/* last descriptor in packet */
270 	int txs_ndesc;			/* # of descriptors used */
271 };
272 
273 /*
274  * Software state for receive buffers. Each descriptor gets a 2k (MCLBYTES)
275  * buffer and a DMA map. For packets which fill more than one buffer, we chain
276  * them together.
277  */
278 struct wm_rxsoft {
279 	struct mbuf *rxs_mbuf;		/* head of our mbuf chain */
280 	bus_dmamap_t rxs_dmamap;	/* our DMA map */
281 };
282 
283 #define WM_LINKUP_TIMEOUT	50
284 
285 static uint16_t swfwphysem[] = {
286 	SWFW_PHY0_SM,
287 	SWFW_PHY1_SM,
288 	SWFW_PHY2_SM,
289 	SWFW_PHY3_SM
290 };
291 
292 static const uint32_t wm_82580_rxpbs_table[] = {
293 	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140
294 };
295 
296 struct wm_softc;
297 
298 #ifdef WM_EVENT_COUNTERS
299 #define WM_Q_EVCNT_DEFINE(qname, evname)				\
300 	char qname##_##evname##_evcnt_name[sizeof("qname##XX##evname")]; \
301 	struct evcnt qname##_ev_##evname;
302 
303 #define WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, evtype)	\
304 	do {								\
305 		snprintf((q)->qname##_##evname##_evcnt_name,		\
306 		    sizeof((q)->qname##_##evname##_evcnt_name),		\
307 		    "%s%02d%s", #qname, (qnum), #evname);		\
308 		evcnt_attach_dynamic(&(q)->qname##_ev_##evname,		\
309 		    (evtype), NULL, (xname),				\
310 		    (q)->qname##_##evname##_evcnt_name);		\
311 	} while (0)
312 
313 #define WM_Q_MISC_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
314 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_MISC)
315 
316 #define WM_Q_INTR_EVCNT_ATTACH(qname, evname, q, qnum, xname)		\
317 	WM_Q_EVCNT_ATTACH(qname, evname, q, qnum, xname, EVCNT_TYPE_INTR)
318 
319 #define WM_Q_EVCNT_DETACH(qname, evname, q, qnum)	\
320 	evcnt_detach(&(q)->qname##_ev_##evname);
321 #endif /* WM_EVENT_COUNTERS */
322 
323 struct wm_txqueue {
324 	kmutex_t *txq_lock;		/* lock for tx operations */
325 
326 	struct wm_softc *txq_sc;	/* shortcut (skip struct wm_queue) */
327 
328 	/* Software state for the transmit descriptors. */
329 	int txq_num;			/* must be a power of two */
330 	struct wm_txsoft txq_soft[WM_TXQUEUELEN_MAX];
331 
332 	/* TX control data structures. */
333 	int txq_ndesc;			/* must be a power of two */
334 	size_t txq_descsize;		/* a tx descriptor size */
335 	txdescs_t *txq_descs_u;
336 	bus_dmamap_t txq_desc_dmamap;	/* control data DMA map */
337 	bus_dma_segment_t txq_desc_seg;	/* control data segment */
338 	int txq_desc_rseg;		/* real number of control segment */
339 #define	txq_desc_dma	txq_desc_dmamap->dm_segs[0].ds_addr
340 #define	txq_descs	txq_descs_u->sctxu_txdescs
341 #define	txq_nq_descs	txq_descs_u->sctxu_nq_txdescs
342 
343 	bus_addr_t txq_tdt_reg;		/* offset of TDT register */
344 
345 	int txq_free;			/* number of free Tx descriptors */
346 	int txq_next;			/* next ready Tx descriptor */
347 
348 	int txq_sfree;			/* number of free Tx jobs */
349 	int txq_snext;			/* next free Tx job */
350 	int txq_sdirty;			/* dirty Tx jobs */
351 
352 	/* These 4 variables are used only on the 82547. */
353 	int txq_fifo_size;		/* Tx FIFO size */
354 	int txq_fifo_head;		/* current head of FIFO */
355 	uint32_t txq_fifo_addr;		/* internal address of start of FIFO */
356 	int txq_fifo_stall;		/* Tx FIFO is stalled */
357 
358 	/*
359 	 * When ncpu > number of Tx queues, a Tx queue is shared by multiple
360 	 * CPUs. This queue intermediate them without block.
361 	 */
362 	pcq_t *txq_interq;
363 
364 	/*
365 	 * NEWQUEUE devices must use not ifp->if_flags but txq->txq_flags
366 	 * to manage Tx H/W queue's busy flag.
367 	 */
368 	int txq_flags;			/* flags for H/W queue, see below */
369 #define	WM_TXQ_NO_SPACE	0x1
370 
371 	bool txq_stopping;
372 
373 	bool txq_sending;
374 	time_t txq_lastsent;
375 
376 	uint32_t txq_packets;		/* for AIM */
377 	uint32_t txq_bytes;		/* for AIM */
378 #ifdef WM_EVENT_COUNTERS
379 	WM_Q_EVCNT_DEFINE(txq, txsstall)	/* Tx stalled due to no txs */
380 	WM_Q_EVCNT_DEFINE(txq, txdstall)	/* Tx stalled due to no txd */
381 	WM_Q_EVCNT_DEFINE(txq, txfifo_stall)	/* Tx FIFO stalls (82547) */
382 	WM_Q_EVCNT_DEFINE(txq, txdw)		/* Tx descriptor interrupts */
383 	WM_Q_EVCNT_DEFINE(txq, txqe)		/* Tx queue empty interrupts */
384 						/* XXX not used? */
385 
386 	WM_Q_EVCNT_DEFINE(txq, txipsum)		/* IP checksums comp. out-bound */
387 	WM_Q_EVCNT_DEFINE(txq, txtusum)		/* TCP/UDP cksums comp. out-bound */
388 	WM_Q_EVCNT_DEFINE(txq, txtusum6)	/* TCP/UDP v6 cksums comp. out-bound */
389 	WM_Q_EVCNT_DEFINE(txq, txtso)		/* TCP seg offload out-bound (IPv4) */
390 	WM_Q_EVCNT_DEFINE(txq, txtso6)		/* TCP seg offload out-bound (IPv6) */
391 	WM_Q_EVCNT_DEFINE(txq, txtsopain)	/* painful header manip. for TSO */
392 
393 	WM_Q_EVCNT_DEFINE(txq, txdrop)		/* Tx packets dropped(too many segs) */
394 
395 	WM_Q_EVCNT_DEFINE(txq, tu)		/* Tx underrun */
396 
397 	char txq_txseg_evcnt_names[WM_NTXSEGS][sizeof("txqXXtxsegXXX")];
398 	struct evcnt txq_ev_txseg[WM_NTXSEGS]; /* Tx packets w/ N segments */
399 #endif /* WM_EVENT_COUNTERS */
400 };
401 
402 struct wm_rxqueue {
403 	kmutex_t *rxq_lock;		/* lock for rx operations */
404 
405 	struct wm_softc *rxq_sc;	/* shortcut (skip struct wm_queue) */
406 
407 	/* Software state for the receive descriptors. */
408 	struct wm_rxsoft rxq_soft[WM_NRXDESC];
409 
410 	/* RX control data structures. */
411 	int rxq_ndesc;			/* must be a power of two */
412 	size_t rxq_descsize;		/* a rx descriptor size */
413 	rxdescs_t *rxq_descs_u;
414 	bus_dmamap_t rxq_desc_dmamap;	/* control data DMA map */
415 	bus_dma_segment_t rxq_desc_seg;	/* control data segment */
416 	int rxq_desc_rseg;		/* real number of control segment */
417 #define	rxq_desc_dma	rxq_desc_dmamap->dm_segs[0].ds_addr
418 #define	rxq_descs	rxq_descs_u->sctxu_rxdescs
419 #define	rxq_ext_descs	rxq_descs_u->sctxu_ext_rxdescs
420 #define	rxq_nq_descs	rxq_descs_u->sctxu_nq_rxdescs
421 
422 	bus_addr_t rxq_rdt_reg;		/* offset of RDT register */
423 
424 	int rxq_ptr;			/* next ready Rx desc/queue ent */
425 	int rxq_discard;
426 	int rxq_len;
427 	struct mbuf *rxq_head;
428 	struct mbuf *rxq_tail;
429 	struct mbuf **rxq_tailp;
430 
431 	bool rxq_stopping;
432 
433 	uint32_t rxq_packets;		/* for AIM */
434 	uint32_t rxq_bytes;		/* for AIM */
435 #ifdef WM_EVENT_COUNTERS
436 	WM_Q_EVCNT_DEFINE(rxq, rxintr);		/* Rx interrupts */
437 	WM_Q_EVCNT_DEFINE(rxq, rxdefer);	/* Rx deferred processing */
438 
439 	WM_Q_EVCNT_DEFINE(rxq, rxipsum);	/* IP checksums checked in-bound */
440 	WM_Q_EVCNT_DEFINE(rxq, rxtusum);	/* TCP/UDP cksums checked in-bound */
441 #endif
442 };
443 
444 struct wm_queue {
445 	int wmq_id;			/* index of TX/RX queues */
446 	int wmq_intr_idx;		/* index of MSI-X tables */
447 
448 	uint32_t wmq_itr;		/* interrupt interval per queue. */
449 	bool wmq_set_itr;
450 
451 	struct wm_txqueue wmq_txq;
452 	struct wm_rxqueue wmq_rxq;
453 
454 	void *wmq_si;
455 };
456 
457 struct wm_phyop {
458 	int (*acquire)(struct wm_softc *);
459 	void (*release)(struct wm_softc *);
460 	int reset_delay_us;
461 };
462 
463 struct wm_nvmop {
464 	int (*acquire)(struct wm_softc *);
465 	void (*release)(struct wm_softc *);
466 	int (*read)(struct wm_softc *, int, int, uint16_t *);
467 };
468 
469 /*
470  * Software state per device.
471  */
472 struct wm_softc {
473 	device_t sc_dev;		/* generic device information */
474 	bus_space_tag_t sc_st;		/* bus space tag */
475 	bus_space_handle_t sc_sh;	/* bus space handle */
476 	bus_size_t sc_ss;		/* bus space size */
477 	bus_space_tag_t sc_iot;		/* I/O space tag */
478 	bus_space_handle_t sc_ioh;	/* I/O space handle */
479 	bus_size_t sc_ios;		/* I/O space size */
480 	bus_space_tag_t sc_flasht;	/* flash registers space tag */
481 	bus_space_handle_t sc_flashh;	/* flash registers space handle */
482 	bus_size_t sc_flashs;		/* flash registers space size */
483 	off_t sc_flashreg_offset;	/*
484 					 * offset to flash registers from
485 					 * start of BAR
486 					 */
487 	bus_dma_tag_t sc_dmat;		/* bus DMA tag */
488 
489 	struct ethercom sc_ethercom;	/* ethernet common data */
490 	struct mii_data sc_mii;		/* MII/media information */
491 
492 	pci_chipset_tag_t sc_pc;
493 	pcitag_t sc_pcitag;
494 	int sc_bus_speed;		/* PCI/PCIX bus speed */
495 	int sc_pcixe_capoff;		/* PCI[Xe] capability reg offset */
496 
497 	uint16_t sc_pcidevid;		/* PCI device ID */
498 	wm_chip_type sc_type;		/* MAC type */
499 	int sc_rev;			/* MAC revision */
500 	wm_phy_type sc_phytype;		/* PHY type */
501 	uint32_t sc_mediatype;		/* Media type (Copper, Fiber, SERDES)*/
502 #define	WM_MEDIATYPE_UNKNOWN		0x00
503 #define	WM_MEDIATYPE_FIBER		0x01
504 #define	WM_MEDIATYPE_COPPER		0x02
505 #define	WM_MEDIATYPE_SERDES		0x03 /* Internal SERDES */
506 	int sc_funcid;			/* unit number of the chip (0 to 3) */
507 	int sc_flags;			/* flags; see below */
508 	int sc_if_flags;		/* last if_flags */
509 	int sc_flowflags;		/* 802.3x flow control flags */
510 	int sc_align_tweak;
511 
512 	void *sc_ihs[WM_MAX_NINTR];	/*
513 					 * interrupt cookie.
514 					 * - legacy and msi use sc_ihs[0] only
515 					 * - msix use sc_ihs[0] to sc_ihs[nintrs-1]
516 					 */
517 	pci_intr_handle_t *sc_intrs;	/*
518 					 * legacy and msi use sc_intrs[0] only
519 					 * msix use sc_intrs[0] to sc_ihs[nintrs-1]
520 					 */
521 	int sc_nintrs;			/* number of interrupts */
522 
523 	int sc_link_intr_idx;		/* index of MSI-X tables */
524 
525 	callout_t sc_tick_ch;		/* tick callout */
526 	bool sc_core_stopping;
527 
528 	int sc_nvm_ver_major;
529 	int sc_nvm_ver_minor;
530 	int sc_nvm_ver_build;
531 	int sc_nvm_addrbits;		/* NVM address bits */
532 	unsigned int sc_nvm_wordsize;	/* NVM word size */
533 	int sc_ich8_flash_base;
534 	int sc_ich8_flash_bank_size;
535 	int sc_nvm_k1_enabled;
536 
537 	int sc_nqueues;
538 	struct wm_queue *sc_queue;
539 	u_int sc_tx_process_limit;	/* Tx processing repeat limit in softint */
540 	u_int sc_tx_intr_process_limit;	/* Tx processing repeat limit in H/W intr */
541 	u_int sc_rx_process_limit;	/* Rx processing repeat limit in softint */
542 	u_int sc_rx_intr_process_limit;	/* Rx processing repeat limit in H/W intr */
543 
544 	int sc_affinity_offset;
545 
546 #ifdef WM_EVENT_COUNTERS
547 	/* Event counters. */
548 	struct evcnt sc_ev_linkintr;	/* Link interrupts */
549 
550 	/* WM_T_82542_2_1 only */
551 	struct evcnt sc_ev_tx_xoff;	/* Tx PAUSE(!0) frames */
552 	struct evcnt sc_ev_tx_xon;	/* Tx PAUSE(0) frames */
553 	struct evcnt sc_ev_rx_xoff;	/* Rx PAUSE(!0) frames */
554 	struct evcnt sc_ev_rx_xon;	/* Rx PAUSE(0) frames */
555 	struct evcnt sc_ev_rx_macctl;	/* Rx Unsupported */
556 #endif /* WM_EVENT_COUNTERS */
557 
558 	/* This variable are used only on the 82547. */
559 	callout_t sc_txfifo_ch;		/* Tx FIFO stall work-around timer */
560 
561 	uint32_t sc_ctrl;		/* prototype CTRL register */
562 #if 0
563 	uint32_t sc_ctrl_ext;		/* prototype CTRL_EXT register */
564 #endif
565 	uint32_t sc_icr;		/* prototype interrupt bits */
566 	uint32_t sc_itr_init;		/* prototype intr throttling reg */
567 	uint32_t sc_tctl;		/* prototype TCTL register */
568 	uint32_t sc_rctl;		/* prototype RCTL register */
569 	uint32_t sc_txcw;		/* prototype TXCW register */
570 	uint32_t sc_tipg;		/* prototype TIPG register */
571 	uint32_t sc_fcrtl;		/* prototype FCRTL register */
572 	uint32_t sc_pba;		/* prototype PBA register */
573 
574 	int sc_tbi_linkup;		/* TBI link status */
575 	int sc_tbi_serdes_anegticks;	/* autonegotiation ticks */
576 	int sc_tbi_serdes_ticks;	/* tbi ticks */
577 
578 	int sc_mchash_type;		/* multicast filter offset */
579 
580 	krndsource_t rnd_source;	/* random source */
581 
582 	struct if_percpuq *sc_ipq;	/* softint-based input queues */
583 
584 	kmutex_t *sc_core_lock;		/* lock for softc operations */
585 	kmutex_t *sc_ich_phymtx;	/*
586 					 * 82574/82583/ICH/PCH specific PHY
587 					 * mutex. For 82574/82583, the mutex
588 					 * is used for both PHY and NVM.
589 					 */
590 	kmutex_t *sc_ich_nvmmtx;	/* ICH/PCH specific NVM mutex */
591 
592 	struct wm_phyop phy;
593 	struct wm_nvmop nvm;
594 };
595 
596 #define WM_CORE_LOCK(_sc)	if ((_sc)->sc_core_lock) mutex_enter((_sc)->sc_core_lock)
597 #define WM_CORE_UNLOCK(_sc)	if ((_sc)->sc_core_lock) mutex_exit((_sc)->sc_core_lock)
598 #define WM_CORE_LOCKED(_sc)	(!(_sc)->sc_core_lock || mutex_owned((_sc)->sc_core_lock))
599 
600 #define	WM_RXCHAIN_RESET(rxq)						\
601 do {									\
602 	(rxq)->rxq_tailp = &(rxq)->rxq_head;				\
603 	*(rxq)->rxq_tailp = NULL;					\
604 	(rxq)->rxq_len = 0;						\
605 } while (/*CONSTCOND*/0)
606 
607 #define	WM_RXCHAIN_LINK(rxq, m)						\
608 do {									\
609 	*(rxq)->rxq_tailp = (rxq)->rxq_tail = (m);			\
610 	(rxq)->rxq_tailp = &(m)->m_next;				\
611 } while (/*CONSTCOND*/0)
612 
613 #ifdef WM_EVENT_COUNTERS
614 #define	WM_EVCNT_INCR(ev)	(ev)->ev_count++
615 #define	WM_EVCNT_ADD(ev, val)	(ev)->ev_count += (val)
616 
617 #define WM_Q_EVCNT_INCR(qname, evname)			\
618 	WM_EVCNT_INCR(&(qname)->qname##_ev_##evname)
619 #define WM_Q_EVCNT_ADD(qname, evname, val)		\
620 	WM_EVCNT_ADD(&(qname)->qname##_ev_##evname, (val))
621 #else /* !WM_EVENT_COUNTERS */
622 #define	WM_EVCNT_INCR(ev)	/* nothing */
623 #define	WM_EVCNT_ADD(ev, val)	/* nothing */
624 
625 #define WM_Q_EVCNT_INCR(qname, evname)		/* nothing */
626 #define WM_Q_EVCNT_ADD(qname, evname, val)	/* nothing */
627 #endif /* !WM_EVENT_COUNTERS */
628 
629 #define	CSR_READ(sc, reg)						\
630 	bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
631 #define	CSR_WRITE(sc, reg, val)						\
632 	bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
633 #define	CSR_WRITE_FLUSH(sc)						\
634 	(void) CSR_READ((sc), WMREG_STATUS)
635 
636 #define ICH8_FLASH_READ32(sc, reg)					\
637 	bus_space_read_4((sc)->sc_flasht, (sc)->sc_flashh,		\
638 	    (reg) + sc->sc_flashreg_offset)
639 #define ICH8_FLASH_WRITE32(sc, reg, data)				\
640 	bus_space_write_4((sc)->sc_flasht, (sc)->sc_flashh,		\
641 	    (reg) + sc->sc_flashreg_offset, (data))
642 
643 #define ICH8_FLASH_READ16(sc, reg)					\
644 	bus_space_read_2((sc)->sc_flasht, (sc)->sc_flashh,		\
645 	    (reg) + sc->sc_flashreg_offset)
646 #define ICH8_FLASH_WRITE16(sc, reg, data)				\
647 	bus_space_write_2((sc)->sc_flasht, (sc)->sc_flashh,		\
648 	    (reg) + sc->sc_flashreg_offset, (data))
649 
650 #define	WM_CDTXADDR(txq, x)	((txq)->txq_desc_dma + WM_CDTXOFF((txq), (x)))
651 #define	WM_CDRXADDR(rxq, x)	((rxq)->rxq_desc_dma + WM_CDRXOFF((rxq), (x)))
652 
653 #define	WM_CDTXADDR_LO(txq, x)	(WM_CDTXADDR((txq), (x)) & 0xffffffffU)
654 #define	WM_CDTXADDR_HI(txq, x)						\
655 	(sizeof(bus_addr_t) == 8 ?					\
656 	 (uint64_t)WM_CDTXADDR((txq), (x)) >> 32 : 0)
657 
658 #define	WM_CDRXADDR_LO(rxq, x)	(WM_CDRXADDR((rxq), (x)) & 0xffffffffU)
659 #define	WM_CDRXADDR_HI(rxq, x)						\
660 	(sizeof(bus_addr_t) == 8 ?					\
661 	 (uint64_t)WM_CDRXADDR((rxq), (x)) >> 32 : 0)
662 
663 /*
664  * Register read/write functions.
665  * Other than CSR_{READ|WRITE}().
666  */
667 #if 0
668 static inline uint32_t wm_io_read(struct wm_softc *, int);
669 #endif
670 static inline void wm_io_write(struct wm_softc *, int, uint32_t);
671 static inline void wm_82575_write_8bit_ctlr_reg(struct wm_softc *, uint32_t,
672     uint32_t, uint32_t);
673 static inline void wm_set_dma_addr(volatile wiseman_addr_t *, bus_addr_t);
674 
675 /*
676  * Descriptor sync/init functions.
677  */
678 static inline void wm_cdtxsync(struct wm_txqueue *, int, int, int);
679 static inline void wm_cdrxsync(struct wm_rxqueue *, int, int);
680 static inline void wm_init_rxdesc(struct wm_rxqueue *, int);
681 
682 /*
683  * Device driver interface functions and commonly used functions.
684  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
685  */
686 static const struct wm_product *wm_lookup(const struct pci_attach_args *);
687 static int	wm_match(device_t, cfdata_t, void *);
688 static void	wm_attach(device_t, device_t, void *);
689 static int	wm_detach(device_t, int);
690 static bool	wm_suspend(device_t, const pmf_qual_t *);
691 static bool	wm_resume(device_t, const pmf_qual_t *);
692 static void	wm_watchdog(struct ifnet *);
693 static void	wm_watchdog_txq(struct ifnet *, struct wm_txqueue *,
694     uint16_t *);
695 static void	wm_watchdog_txq_locked(struct ifnet *, struct wm_txqueue *,
696     uint16_t *);
697 static void	wm_tick(void *);
698 static int	wm_ifflags_cb(struct ethercom *);
699 static int	wm_ioctl(struct ifnet *, u_long, void *);
700 /* MAC address related */
701 static uint16_t	wm_check_alt_mac_addr(struct wm_softc *);
702 static int	wm_read_mac_addr(struct wm_softc *, uint8_t *);
703 static void	wm_set_ral(struct wm_softc *, const uint8_t *, int);
704 static uint32_t	wm_mchash(struct wm_softc *, const uint8_t *);
705 static void	wm_set_filter(struct wm_softc *);
706 /* Reset and init related */
707 static void	wm_set_vlan(struct wm_softc *);
708 static void	wm_set_pcie_completion_timeout(struct wm_softc *);
709 static void	wm_get_auto_rd_done(struct wm_softc *);
710 static void	wm_lan_init_done(struct wm_softc *);
711 static void	wm_get_cfg_done(struct wm_softc *);
712 static void	wm_phy_post_reset(struct wm_softc *);
713 static void	wm_write_smbus_addr(struct wm_softc *);
714 static void	wm_init_lcd_from_nvm(struct wm_softc *);
715 static void	wm_initialize_hardware_bits(struct wm_softc *);
716 static uint32_t	wm_rxpbs_adjust_82580(uint32_t);
717 static void	wm_reset_phy(struct wm_softc *);
718 static void	wm_flush_desc_rings(struct wm_softc *);
719 static void	wm_reset(struct wm_softc *);
720 static int	wm_add_rxbuf(struct wm_rxqueue *, int);
721 static void	wm_rxdrain(struct wm_rxqueue *);
722 static void	wm_init_rss(struct wm_softc *);
723 static void	wm_adjust_qnum(struct wm_softc *, int);
724 static inline bool	wm_is_using_msix(struct wm_softc *);
725 static inline bool	wm_is_using_multiqueue(struct wm_softc *);
726 static int	wm_softint_establish(struct wm_softc *, int, int);
727 static int	wm_setup_legacy(struct wm_softc *);
728 static int	wm_setup_msix(struct wm_softc *);
729 static int	wm_init(struct ifnet *);
730 static int	wm_init_locked(struct ifnet *);
731 static void	wm_unset_stopping_flags(struct wm_softc *);
732 static void	wm_set_stopping_flags(struct wm_softc *);
733 static void	wm_stop(struct ifnet *, int);
734 static void	wm_stop_locked(struct ifnet *, int);
735 static void	wm_dump_mbuf_chain(struct wm_softc *, struct mbuf *);
736 static void	wm_82547_txfifo_stall(void *);
737 static int	wm_82547_txfifo_bugchk(struct wm_softc *, struct mbuf *);
738 static void	wm_itrs_writereg(struct wm_softc *, struct wm_queue *);
739 /* DMA related */
740 static int	wm_alloc_tx_descs(struct wm_softc *, struct wm_txqueue *);
741 static void	wm_free_tx_descs(struct wm_softc *, struct wm_txqueue *);
742 static void	wm_init_tx_descs(struct wm_softc *, struct wm_txqueue *);
743 static void	wm_init_tx_regs(struct wm_softc *, struct wm_queue *,
744     struct wm_txqueue *);
745 static int	wm_alloc_rx_descs(struct wm_softc *, struct wm_rxqueue *);
746 static void	wm_free_rx_descs(struct wm_softc *, struct wm_rxqueue *);
747 static void	wm_init_rx_regs(struct wm_softc *, struct wm_queue *,
748     struct wm_rxqueue *);
749 static int	wm_alloc_tx_buffer(struct wm_softc *, struct wm_txqueue *);
750 static void	wm_free_tx_buffer(struct wm_softc *, struct wm_txqueue *);
751 static void	wm_init_tx_buffer(struct wm_softc *, struct wm_txqueue *);
752 static int	wm_alloc_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
753 static void	wm_free_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
754 static int	wm_init_rx_buffer(struct wm_softc *, struct wm_rxqueue *);
755 static void	wm_init_tx_queue(struct wm_softc *, struct wm_queue *,
756     struct wm_txqueue *);
757 static int	wm_init_rx_queue(struct wm_softc *, struct wm_queue *,
758     struct wm_rxqueue *);
759 static int	wm_alloc_txrx_queues(struct wm_softc *);
760 static void	wm_free_txrx_queues(struct wm_softc *);
761 static int	wm_init_txrx_queues(struct wm_softc *);
762 /* Start */
763 static int	wm_tx_offload(struct wm_softc *, struct wm_txqueue *,
764     struct wm_txsoft *, uint32_t *, uint8_t *);
765 static inline int	wm_select_txqueue(struct ifnet *, struct mbuf *);
766 static void	wm_start(struct ifnet *);
767 static void	wm_start_locked(struct ifnet *);
768 static int	wm_transmit(struct ifnet *, struct mbuf *);
769 static void	wm_transmit_locked(struct ifnet *, struct wm_txqueue *);
770 static void	wm_send_common_locked(struct ifnet *, struct wm_txqueue *,
771     bool);
772 static int	wm_nq_tx_offload(struct wm_softc *, struct wm_txqueue *,
773     struct wm_txsoft *, uint32_t *, uint32_t *, bool *);
774 static void	wm_nq_start(struct ifnet *);
775 static void	wm_nq_start_locked(struct ifnet *);
776 static int	wm_nq_transmit(struct ifnet *, struct mbuf *);
777 static void	wm_nq_transmit_locked(struct ifnet *, struct wm_txqueue *);
778 static void	wm_nq_send_common_locked(struct ifnet *, struct wm_txqueue *,
779     bool);
780 static void	wm_deferred_start_locked(struct wm_txqueue *);
781 static void	wm_handle_queue(void *);
782 /* Interrupt */
783 static bool	wm_txeof(struct wm_txqueue *, u_int);
784 static bool	wm_rxeof(struct wm_rxqueue *, u_int);
785 static void	wm_linkintr_gmii(struct wm_softc *, uint32_t);
786 static void	wm_linkintr_tbi(struct wm_softc *, uint32_t);
787 static void	wm_linkintr_serdes(struct wm_softc *, uint32_t);
788 static void	wm_linkintr(struct wm_softc *, uint32_t);
789 static int	wm_intr_legacy(void *);
790 static inline void	wm_txrxintr_disable(struct wm_queue *);
791 static inline void	wm_txrxintr_enable(struct wm_queue *);
792 static void	wm_itrs_calculate(struct wm_softc *, struct wm_queue *);
793 static int	wm_txrxintr_msix(void *);
794 static int	wm_linkintr_msix(void *);
795 
796 /*
797  * Media related.
798  * GMII, SGMII, TBI, SERDES and SFP.
799  */
800 /* Common */
801 static void	wm_tbi_serdes_set_linkled(struct wm_softc *);
802 /* GMII related */
803 static void	wm_gmii_reset(struct wm_softc *);
804 static void	wm_gmii_setup_phytype(struct wm_softc *, uint32_t, uint16_t);
805 static int	wm_get_phy_id_82575(struct wm_softc *);
806 static void	wm_gmii_mediainit(struct wm_softc *, pci_product_id_t);
807 static int	wm_gmii_mediachange(struct ifnet *);
808 static void	wm_gmii_mediastatus(struct ifnet *, struct ifmediareq *);
809 static void	wm_i82543_mii_sendbits(struct wm_softc *, uint32_t, int);
810 static uint32_t	wm_i82543_mii_recvbits(struct wm_softc *);
811 static int	wm_gmii_i82543_readreg(device_t, int, int);
812 static void	wm_gmii_i82543_writereg(device_t, int, int, int);
813 static int	wm_gmii_mdic_readreg(device_t, int, int);
814 static void	wm_gmii_mdic_writereg(device_t, int, int, int);
815 static int	wm_gmii_i82544_readreg(device_t, int, int);
816 static void	wm_gmii_i82544_writereg(device_t, int, int, int);
817 static int	wm_gmii_i80003_readreg(device_t, int, int);
818 static void	wm_gmii_i80003_writereg(device_t, int, int, int);
819 static int	wm_gmii_bm_readreg(device_t, int, int);
820 static void	wm_gmii_bm_writereg(device_t, int, int, int);
821 static void	wm_access_phy_wakeup_reg_bm(device_t, int, int16_t *, int);
822 static int	wm_gmii_hv_readreg(device_t, int, int);
823 static int	wm_gmii_hv_readreg_locked(device_t, int, int);
824 static void	wm_gmii_hv_writereg(device_t, int, int, int);
825 static void	wm_gmii_hv_writereg_locked(device_t, int, int, int);
826 static int	wm_gmii_82580_readreg(device_t, int, int);
827 static void	wm_gmii_82580_writereg(device_t, int, int, int);
828 static int	wm_gmii_gs40g_readreg(device_t, int, int);
829 static void	wm_gmii_gs40g_writereg(device_t, int, int, int);
830 static void	wm_gmii_statchg(struct ifnet *);
831 /*
832  * kumeran related (80003, ICH* and PCH*).
833  * These functions are not for accessing MII registers but for accessing
834  * kumeran specific registers.
835  */
836 static int	wm_kmrn_readreg(struct wm_softc *, int, uint16_t *);
837 static int	wm_kmrn_readreg_locked(struct wm_softc *, int, uint16_t *);
838 static int	wm_kmrn_writereg(struct wm_softc *, int, uint16_t);
839 static int	wm_kmrn_writereg_locked(struct wm_softc *, int, uint16_t);
840 /* SGMII */
841 static bool	wm_sgmii_uses_mdio(struct wm_softc *);
842 static int	wm_sgmii_readreg(device_t, int, int);
843 static void	wm_sgmii_writereg(device_t, int, int, int);
844 /* TBI related */
845 static void	wm_tbi_mediainit(struct wm_softc *);
846 static int	wm_tbi_mediachange(struct ifnet *);
847 static void	wm_tbi_mediastatus(struct ifnet *, struct ifmediareq *);
848 static int	wm_check_for_link(struct wm_softc *);
849 static void	wm_tbi_tick(struct wm_softc *);
850 /* SERDES related */
851 static void	wm_serdes_power_up_link_82575(struct wm_softc *);
852 static int	wm_serdes_mediachange(struct ifnet *);
853 static void	wm_serdes_mediastatus(struct ifnet *, struct ifmediareq *);
854 static void	wm_serdes_tick(struct wm_softc *);
855 /* SFP related */
856 static int	wm_sfp_read_data_byte(struct wm_softc *, uint16_t, uint8_t *);
857 static uint32_t	wm_sfp_get_media_type(struct wm_softc *);
858 
859 /*
860  * NVM related.
861  * Microwire, SPI (w/wo EERD) and Flash.
862  */
863 /* Misc functions */
864 static void	wm_eeprom_sendbits(struct wm_softc *, uint32_t, int);
865 static void	wm_eeprom_recvbits(struct wm_softc *, uint32_t *, int);
866 static int	wm_nvm_set_addrbits_size_eecd(struct wm_softc *);
867 /* Microwire */
868 static int	wm_nvm_read_uwire(struct wm_softc *, int, int, uint16_t *);
869 /* SPI */
870 static int	wm_nvm_ready_spi(struct wm_softc *);
871 static int	wm_nvm_read_spi(struct wm_softc *, int, int, uint16_t *);
872 /* Using with EERD */
873 static int	wm_poll_eerd_eewr_done(struct wm_softc *, int);
874 static int	wm_nvm_read_eerd(struct wm_softc *, int, int, uint16_t *);
875 /* Flash */
876 static int	wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *,
877     unsigned int *);
878 static int32_t	wm_ich8_cycle_init(struct wm_softc *);
879 static int32_t	wm_ich8_flash_cycle(struct wm_softc *, uint32_t);
880 static int32_t	wm_read_ich8_data(struct wm_softc *, uint32_t, uint32_t,
881     uint32_t *);
882 static int32_t	wm_read_ich8_byte(struct wm_softc *, uint32_t, uint8_t *);
883 static int32_t	wm_read_ich8_word(struct wm_softc *, uint32_t, uint16_t *);
884 static int32_t	wm_read_ich8_dword(struct wm_softc *, uint32_t, uint32_t *);
885 static int	wm_nvm_read_ich8(struct wm_softc *, int, int, uint16_t *);
886 static int	wm_nvm_read_spt(struct wm_softc *, int, int, uint16_t *);
887 /* iNVM */
888 static int	wm_nvm_read_word_invm(struct wm_softc *, uint16_t, uint16_t *);
889 static int	wm_nvm_read_invm(struct wm_softc *, int, int, uint16_t *);
890 /* Lock, detecting NVM type, validate checksum and read */
891 static int	wm_nvm_is_onboard_eeprom(struct wm_softc *);
892 static int	wm_nvm_flash_presence_i210(struct wm_softc *);
893 static int	wm_nvm_validate_checksum(struct wm_softc *);
894 static void	wm_nvm_version_invm(struct wm_softc *);
895 static void	wm_nvm_version(struct wm_softc *);
896 static int	wm_nvm_read(struct wm_softc *, int, int, uint16_t *);
897 
898 /*
899  * Hardware semaphores.
900  * Very complexed...
901  */
902 static int	wm_get_null(struct wm_softc *);
903 static void	wm_put_null(struct wm_softc *);
904 static int	wm_get_eecd(struct wm_softc *);
905 static void	wm_put_eecd(struct wm_softc *);
906 static int	wm_get_swsm_semaphore(struct wm_softc *); /* 8257[123] */
907 static void	wm_put_swsm_semaphore(struct wm_softc *);
908 static int	wm_get_swfw_semaphore(struct wm_softc *, uint16_t);
909 static void	wm_put_swfw_semaphore(struct wm_softc *, uint16_t);
910 static int	wm_get_nvm_80003(struct wm_softc *);
911 static void	wm_put_nvm_80003(struct wm_softc *);
912 static int	wm_get_nvm_82571(struct wm_softc *);
913 static void	wm_put_nvm_82571(struct wm_softc *);
914 static int	wm_get_phy_82575(struct wm_softc *);
915 static void	wm_put_phy_82575(struct wm_softc *);
916 static int	wm_get_swfwhw_semaphore(struct wm_softc *); /* For 574/583 */
917 static void	wm_put_swfwhw_semaphore(struct wm_softc *);
918 static int	wm_get_swflag_ich8lan(struct wm_softc *);	/* For PHY */
919 static void	wm_put_swflag_ich8lan(struct wm_softc *);
920 static int	wm_get_nvm_ich8lan(struct wm_softc *);
921 static void	wm_put_nvm_ich8lan(struct wm_softc *);
922 static int	wm_get_hw_semaphore_82573(struct wm_softc *);
923 static void	wm_put_hw_semaphore_82573(struct wm_softc *);
924 
925 /*
926  * Management mode and power management related subroutines.
927  * BMC, AMT, suspend/resume and EEE.
928  */
929 #if 0
930 static int	wm_check_mng_mode(struct wm_softc *);
931 static int	wm_check_mng_mode_ich8lan(struct wm_softc *);
932 static int	wm_check_mng_mode_82574(struct wm_softc *);
933 static int	wm_check_mng_mode_generic(struct wm_softc *);
934 #endif
935 static int	wm_enable_mng_pass_thru(struct wm_softc *);
936 static bool	wm_phy_resetisblocked(struct wm_softc *);
937 static void	wm_get_hw_control(struct wm_softc *);
938 static void	wm_release_hw_control(struct wm_softc *);
939 static void	wm_gate_hw_phy_config_ich8lan(struct wm_softc *, bool);
940 static void	wm_smbustopci(struct wm_softc *);
941 static void	wm_init_manageability(struct wm_softc *);
942 static void	wm_release_manageability(struct wm_softc *);
943 static void	wm_get_wakeup(struct wm_softc *);
944 static void	wm_ulp_disable(struct wm_softc *);
945 static void	wm_enable_phy_wakeup(struct wm_softc *);
946 static void	wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *);
947 static void	wm_enable_wakeup(struct wm_softc *);
948 static void	wm_disable_aspm(struct wm_softc *);
949 /* LPLU (Low Power Link Up) */
950 static void	wm_lplu_d0_disable(struct wm_softc *);
951 /* EEE */
952 static void	wm_set_eee_i350(struct wm_softc *);
953 
954 /*
955  * Workarounds (mainly PHY related).
956  * Basically, PHY's workarounds are in the PHY drivers.
957  */
958 static void	wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *);
959 static void	wm_gig_downshift_workaround_ich8lan(struct wm_softc *);
960 static void	wm_hv_phy_workaround_ich8lan(struct wm_softc *);
961 static void	wm_lv_phy_workaround_ich8lan(struct wm_softc *);
962 static int	wm_k1_gig_workaround_hv(struct wm_softc *, int);
963 static void	wm_set_mdio_slow_mode_hv(struct wm_softc *);
964 static void	wm_configure_k1_ich8lan(struct wm_softc *, int);
965 static void	wm_reset_init_script_82575(struct wm_softc *);
966 static void	wm_reset_mdicnfg_82580(struct wm_softc *);
967 static bool	wm_phy_is_accessible_pchlan(struct wm_softc *);
968 static void	wm_toggle_lanphypc_pch_lpt(struct wm_softc *);
969 static int	wm_platform_pm_pch_lpt(struct wm_softc *, bool);
970 static void	wm_pll_workaround_i210(struct wm_softc *);
971 static void	wm_legacy_irq_quirk_spt(struct wm_softc *);
972 
973 CFATTACH_DECL3_NEW(wm, sizeof(struct wm_softc),
974     wm_match, wm_attach, wm_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
975 
976 /*
977  * Devices supported by this driver.
978  */
979 static const struct wm_product {
980 	pci_vendor_id_t		wmp_vendor;
981 	pci_product_id_t	wmp_product;
982 	const char		*wmp_name;
983 	wm_chip_type		wmp_type;
984 	uint32_t		wmp_flags;
985 #define	WMP_F_UNKNOWN		WM_MEDIATYPE_UNKNOWN
986 #define	WMP_F_FIBER		WM_MEDIATYPE_FIBER
987 #define	WMP_F_COPPER		WM_MEDIATYPE_COPPER
988 #define	WMP_F_SERDES		WM_MEDIATYPE_SERDES
989 #define WMP_MEDIATYPE(x)	((x) & 0x03)
990 } wm_products[] = {
991 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82542,
992 	  "Intel i82542 1000BASE-X Ethernet",
993 	  WM_T_82542_2_1,	WMP_F_FIBER },
994 
995 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_FIBER,
996 	  "Intel i82543GC 1000BASE-X Ethernet",
997 	  WM_T_82543,		WMP_F_FIBER },
998 
999 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82543GC_COPPER,
1000 	  "Intel i82543GC 1000BASE-T Ethernet",
1001 	  WM_T_82543,		WMP_F_COPPER },
1002 
1003 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_COPPER,
1004 	  "Intel i82544EI 1000BASE-T Ethernet",
1005 	  WM_T_82544,		WMP_F_COPPER },
1006 
1007 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544EI_FIBER,
1008 	  "Intel i82544EI 1000BASE-X Ethernet",
1009 	  WM_T_82544,		WMP_F_FIBER },
1010 
1011 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_COPPER,
1012 	  "Intel i82544GC 1000BASE-T Ethernet",
1013 	  WM_T_82544,		WMP_F_COPPER },
1014 
1015 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82544GC_LOM,
1016 	  "Intel i82544GC (LOM) 1000BASE-T Ethernet",
1017 	  WM_T_82544,		WMP_F_COPPER },
1018 
1019 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM,
1020 	  "Intel i82540EM 1000BASE-T Ethernet",
1021 	  WM_T_82540,		WMP_F_COPPER },
1022 
1023 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EM_LOM,
1024 	  "Intel i82540EM (LOM) 1000BASE-T Ethernet",
1025 	  WM_T_82540,		WMP_F_COPPER },
1026 
1027 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LOM,
1028 	  "Intel i82540EP 1000BASE-T Ethernet",
1029 	  WM_T_82540,		WMP_F_COPPER },
1030 
1031 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP,
1032 	  "Intel i82540EP 1000BASE-T Ethernet",
1033 	  WM_T_82540,		WMP_F_COPPER },
1034 
1035 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82540EP_LP,
1036 	  "Intel i82540EP 1000BASE-T Ethernet",
1037 	  WM_T_82540,		WMP_F_COPPER },
1038 
1039 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_COPPER,
1040 	  "Intel i82545EM 1000BASE-T Ethernet",
1041 	  WM_T_82545,		WMP_F_COPPER },
1042 
1043 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_COPPER,
1044 	  "Intel i82545GM 1000BASE-T Ethernet",
1045 	  WM_T_82545_3,		WMP_F_COPPER },
1046 
1047 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_FIBER,
1048 	  "Intel i82545GM 1000BASE-X Ethernet",
1049 	  WM_T_82545_3,		WMP_F_FIBER },
1050 
1051 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545GM_SERDES,
1052 	  "Intel i82545GM Gigabit Ethernet (SERDES)",
1053 	  WM_T_82545_3,		WMP_F_SERDES },
1054 
1055 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_COPPER,
1056 	  "Intel i82546EB 1000BASE-T Ethernet",
1057 	  WM_T_82546,		WMP_F_COPPER },
1058 
1059 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_QUAD,
1060 	  "Intel i82546EB 1000BASE-T Ethernet",
1061 	  WM_T_82546,		WMP_F_COPPER },
1062 
1063 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82545EM_FIBER,
1064 	  "Intel i82545EM 1000BASE-X Ethernet",
1065 	  WM_T_82545,		WMP_F_FIBER },
1066 
1067 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546EB_FIBER,
1068 	  "Intel i82546EB 1000BASE-X Ethernet",
1069 	  WM_T_82546,		WMP_F_FIBER },
1070 
1071 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_COPPER,
1072 	  "Intel i82546GB 1000BASE-T Ethernet",
1073 	  WM_T_82546_3,		WMP_F_COPPER },
1074 
1075 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_FIBER,
1076 	  "Intel i82546GB 1000BASE-X Ethernet",
1077 	  WM_T_82546_3,		WMP_F_FIBER },
1078 
1079 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_SERDES,
1080 	  "Intel i82546GB Gigabit Ethernet (SERDES)",
1081 	  WM_T_82546_3,		WMP_F_SERDES },
1082 
1083 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER,
1084 	  "i82546GB quad-port Gigabit Ethernet",
1085 	  WM_T_82546_3,		WMP_F_COPPER },
1086 
1087 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_QUAD_COPPER_KSP3,
1088 	  "i82546GB quad-port Gigabit Ethernet (KSP3)",
1089 	  WM_T_82546_3,		WMP_F_COPPER },
1090 
1091 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82546GB_PCIE,
1092 	  "Intel PRO/1000MT (82546GB)",
1093 	  WM_T_82546_3,		WMP_F_COPPER },
1094 
1095 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI,
1096 	  "Intel i82541EI 1000BASE-T Ethernet",
1097 	  WM_T_82541,		WMP_F_COPPER },
1098 
1099 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER_LOM,
1100 	  "Intel i82541ER (LOM) 1000BASE-T Ethernet",
1101 	  WM_T_82541,		WMP_F_COPPER },
1102 
1103 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541EI_MOBILE,
1104 	  "Intel i82541EI Mobile 1000BASE-T Ethernet",
1105 	  WM_T_82541,		WMP_F_COPPER },
1106 
1107 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541ER,
1108 	  "Intel i82541ER 1000BASE-T Ethernet",
1109 	  WM_T_82541_2,		WMP_F_COPPER },
1110 
1111 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI,
1112 	  "Intel i82541GI 1000BASE-T Ethernet",
1113 	  WM_T_82541_2,		WMP_F_COPPER },
1114 
1115 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541GI_MOBILE,
1116 	  "Intel i82541GI Mobile 1000BASE-T Ethernet",
1117 	  WM_T_82541_2,		WMP_F_COPPER },
1118 
1119 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82541PI,
1120 	  "Intel i82541PI 1000BASE-T Ethernet",
1121 	  WM_T_82541_2,		WMP_F_COPPER },
1122 
1123 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI,
1124 	  "Intel i82547EI 1000BASE-T Ethernet",
1125 	  WM_T_82547,		WMP_F_COPPER },
1126 
1127 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547EI_MOBILE,
1128 	  "Intel i82547EI Mobile 1000BASE-T Ethernet",
1129 	  WM_T_82547,		WMP_F_COPPER },
1130 
1131 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82547GI,
1132 	  "Intel i82547GI 1000BASE-T Ethernet",
1133 	  WM_T_82547_2,		WMP_F_COPPER },
1134 
1135 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_COPPER,
1136 	  "Intel PRO/1000 PT (82571EB)",
1137 	  WM_T_82571,		WMP_F_COPPER },
1138 
1139 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_FIBER,
1140 	  "Intel PRO/1000 PF (82571EB)",
1141 	  WM_T_82571,		WMP_F_FIBER },
1142 
1143 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_SERDES,
1144 	  "Intel PRO/1000 PB (82571EB)",
1145 	  WM_T_82571,		WMP_F_SERDES },
1146 
1147 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_COPPER,
1148 	  "Intel PRO/1000 QT (82571EB)",
1149 	  WM_T_82571,		WMP_F_COPPER },
1150 
1151 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571GB_QUAD_COPPER,
1152 	  "Intel PRO/1000 PT Quad Port Server Adapter",
1153 	  WM_T_82571,		WMP_F_COPPER, },
1154 
1155 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571PT_QUAD_COPPER,
1156 	  "Intel Gigabit PT Quad Port Server ExpressModule",
1157 	  WM_T_82571,		WMP_F_COPPER, },
1158 
1159 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_DUAL_SERDES,
1160 	  "Intel 82571EB Dual Gigabit Ethernet (SERDES)",
1161 	  WM_T_82571,		WMP_F_SERDES, },
1162 
1163 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_SERDES,
1164 	  "Intel 82571EB Quad Gigabit Ethernet (SERDES)",
1165 	  WM_T_82571,		WMP_F_SERDES, },
1166 
1167 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82571EB_QUAD_FIBER,
1168 	  "Intel 82571EB Quad 1000baseX Ethernet",
1169 	  WM_T_82571,		WMP_F_FIBER, },
1170 
1171 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_COPPER,
1172 	  "Intel i82572EI 1000baseT Ethernet",
1173 	  WM_T_82572,		WMP_F_COPPER },
1174 
1175 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_FIBER,
1176 	  "Intel i82572EI 1000baseX Ethernet",
1177 	  WM_T_82572,		WMP_F_FIBER },
1178 
1179 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI_SERDES,
1180 	  "Intel i82572EI Gigabit Ethernet (SERDES)",
1181 	  WM_T_82572,		WMP_F_SERDES },
1182 
1183 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82572EI,
1184 	  "Intel i82572EI 1000baseT Ethernet",
1185 	  WM_T_82572,		WMP_F_COPPER },
1186 
1187 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E,
1188 	  "Intel i82573E",
1189 	  WM_T_82573,		WMP_F_COPPER },
1190 
1191 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573E_IAMT,
1192 	  "Intel i82573E IAMT",
1193 	  WM_T_82573,		WMP_F_COPPER },
1194 
1195 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82573L,
1196 	  "Intel i82573L Gigabit Ethernet",
1197 	  WM_T_82573,		WMP_F_COPPER },
1198 
1199 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574L,
1200 	  "Intel i82574L",
1201 	  WM_T_82574,		WMP_F_COPPER },
1202 
1203 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82574LA,
1204 	  "Intel i82574L",
1205 	  WM_T_82574,		WMP_F_COPPER },
1206 
1207 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82583V,
1208 	  "Intel i82583V",
1209 	  WM_T_82583,		WMP_F_COPPER },
1210 
1211 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_DPT,
1212 	  "i80003 dual 1000baseT Ethernet",
1213 	  WM_T_80003,		WMP_F_COPPER },
1214 
1215 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_FIB_DPT,
1216 	  "i80003 dual 1000baseX Ethernet",
1217 	  WM_T_80003,		WMP_F_COPPER },
1218 
1219 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_DPT,
1220 	  "Intel i80003ES2 dual Gigabit Ethernet (SERDES)",
1221 	  WM_T_80003,		WMP_F_SERDES },
1222 
1223 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_CPR_SPT,
1224 	  "Intel i80003 1000baseT Ethernet",
1225 	  WM_T_80003,		WMP_F_COPPER },
1226 
1227 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_80K3LAN_SDS_SPT,
1228 	  "Intel i80003 Gigabit Ethernet (SERDES)",
1229 	  WM_T_80003,		WMP_F_SERDES },
1230 
1231 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_AMT,
1232 	  "Intel i82801H (M_AMT) LAN Controller",
1233 	  WM_T_ICH8,		WMP_F_COPPER },
1234 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_AMT,
1235 	  "Intel i82801H (AMT) LAN Controller",
1236 	  WM_T_ICH8,		WMP_F_COPPER },
1237 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_LAN,
1238 	  "Intel i82801H LAN Controller",
1239 	  WM_T_ICH8,		WMP_F_COPPER },
1240 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_LAN,
1241 	  "Intel i82801H (IFE) 10/100 LAN Controller",
1242 	  WM_T_ICH8,		WMP_F_COPPER },
1243 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_M_LAN,
1244 	  "Intel i82801H (M) LAN Controller",
1245 	  WM_T_ICH8,		WMP_F_COPPER },
1246 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_GT,
1247 	  "Intel i82801H IFE (GT) 10/100 LAN Controller",
1248 	  WM_T_ICH8,		WMP_F_COPPER },
1249 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_IFE_G,
1250 	  "Intel i82801H IFE (G) 10/100 LAN Controller",
1251 	  WM_T_ICH8,		WMP_F_COPPER },
1252 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801H_82567V_3,
1253 	  "82567V-3 LAN Controller",
1254 	  WM_T_ICH8,		WMP_F_COPPER },
1255 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_AMT,
1256 	  "82801I (AMT) LAN Controller",
1257 	  WM_T_ICH9,		WMP_F_COPPER },
1258 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE,
1259 	  "82801I 10/100 LAN Controller",
1260 	  WM_T_ICH9,		WMP_F_COPPER },
1261 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_G,
1262 	  "82801I (G) 10/100 LAN Controller",
1263 	  WM_T_ICH9,		WMP_F_COPPER },
1264 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IFE_GT,
1265 	  "82801I (GT) 10/100 LAN Controller",
1266 	  WM_T_ICH9,		WMP_F_COPPER },
1267 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_C,
1268 	  "82801I (C) LAN Controller",
1269 	  WM_T_ICH9,		WMP_F_COPPER },
1270 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M,
1271 	  "82801I mobile LAN Controller",
1272 	  WM_T_ICH9,		WMP_F_COPPER },
1273 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_V,
1274 	  "82801I mobile (V) LAN Controller",
1275 	  WM_T_ICH9,		WMP_F_COPPER },
1276 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_IGP_M_AMT,
1277 	  "82801I mobile (AMT) LAN Controller",
1278 	  WM_T_ICH9,		WMP_F_COPPER },
1279 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801I_BM,
1280 	  "82567LM-4 LAN Controller",
1281 	  WM_T_ICH9,		WMP_F_COPPER },
1282 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LM,
1283 	  "82567LM-2 LAN Controller",
1284 	  WM_T_ICH10,		WMP_F_COPPER },
1285 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_LF,
1286 	  "82567LF-2 LAN Controller",
1287 	  WM_T_ICH10,		WMP_F_COPPER },
1288 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LM,
1289 	  "82567LM-3 LAN Controller",
1290 	  WM_T_ICH10,		WMP_F_COPPER },
1291 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_LF,
1292 	  "82567LF-3 LAN Controller",
1293 	  WM_T_ICH10,		WMP_F_COPPER },
1294 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_R_BM_V,
1295 	  "82567V-2 LAN Controller",
1296 	  WM_T_ICH10,		WMP_F_COPPER },
1297 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82801J_D_BM_V,
1298 	  "82567V-3? LAN Controller",
1299 	  WM_T_ICH10,		WMP_F_COPPER },
1300 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_HANKSVILLE,
1301 	  "HANKSVILLE LAN Controller",
1302 	  WM_T_ICH10,		WMP_F_COPPER },
1303 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LM,
1304 	  "PCH LAN (82577LM) Controller",
1305 	  WM_T_PCH,		WMP_F_COPPER },
1306 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_M_LC,
1307 	  "PCH LAN (82577LC) Controller",
1308 	  WM_T_PCH,		WMP_F_COPPER },
1309 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DM,
1310 	  "PCH LAN (82578DM) Controller",
1311 	  WM_T_PCH,		WMP_F_COPPER },
1312 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH_D_DC,
1313 	  "PCH LAN (82578DC) Controller",
1314 	  WM_T_PCH,		WMP_F_COPPER },
1315 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_LM,
1316 	  "PCH2 LAN (82579LM) Controller",
1317 	  WM_T_PCH2,		WMP_F_COPPER },
1318 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_PCH2_LV_V,
1319 	  "PCH2 LAN (82579V) Controller",
1320 	  WM_T_PCH2,		WMP_F_COPPER },
1321 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_COPPER,
1322 	  "82575EB dual-1000baseT Ethernet",
1323 	  WM_T_82575,		WMP_F_COPPER },
1324 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575EB_FIBER_SERDES,
1325 	  "82575EB dual-1000baseX Ethernet (SERDES)",
1326 	  WM_T_82575,		WMP_F_SERDES },
1327 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER,
1328 	  "82575GB quad-1000baseT Ethernet",
1329 	  WM_T_82575,		WMP_F_COPPER },
1330 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82575GB_QUAD_COPPER_PM,
1331 	  "82575GB quad-1000baseT Ethernet (PM)",
1332 	  WM_T_82575,		WMP_F_COPPER },
1333 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_COPPER,
1334 	  "82576 1000BaseT Ethernet",
1335 	  WM_T_82576,		WMP_F_COPPER },
1336 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_FIBER,
1337 	  "82576 1000BaseX Ethernet",
1338 	  WM_T_82576,		WMP_F_FIBER },
1339 
1340 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES,
1341 	  "82576 gigabit Ethernet (SERDES)",
1342 	  WM_T_82576,		WMP_F_SERDES },
1343 
1344 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER,
1345 	  "82576 quad-1000BaseT Ethernet",
1346 	  WM_T_82576,		WMP_F_COPPER },
1347 
1348 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_QUAD_COPPER_ET2,
1349 	  "82576 Gigabit ET2 Quad Port Server Adapter",
1350 	  WM_T_82576,		WMP_F_COPPER },
1351 
1352 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS,
1353 	  "82576 gigabit Ethernet",
1354 	  WM_T_82576,		WMP_F_COPPER },
1355 
1356 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_NS_SERDES,
1357 	  "82576 gigabit Ethernet (SERDES)",
1358 	  WM_T_82576,		WMP_F_SERDES },
1359 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82576_SERDES_QUAD,
1360 	  "82576 quad-gigabit Ethernet (SERDES)",
1361 	  WM_T_82576,		WMP_F_SERDES },
1362 
1363 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER,
1364 	  "82580 1000BaseT Ethernet",
1365 	  WM_T_82580,		WMP_F_COPPER },
1366 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_FIBER,
1367 	  "82580 1000BaseX Ethernet",
1368 	  WM_T_82580,		WMP_F_FIBER },
1369 
1370 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SERDES,
1371 	  "82580 1000BaseT Ethernet (SERDES)",
1372 	  WM_T_82580,		WMP_F_SERDES },
1373 
1374 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_SGMII,
1375 	  "82580 gigabit Ethernet (SGMII)",
1376 	  WM_T_82580,		WMP_F_COPPER },
1377 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_COPPER_DUAL,
1378 	  "82580 dual-1000BaseT Ethernet",
1379 	  WM_T_82580,		WMP_F_COPPER },
1380 
1381 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_82580_QUAD_FIBER,
1382 	  "82580 quad-1000BaseX Ethernet",
1383 	  WM_T_82580,		WMP_F_FIBER },
1384 
1385 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SGMII,
1386 	  "DH89XXCC Gigabit Ethernet (SGMII)",
1387 	  WM_T_82580,		WMP_F_COPPER },
1388 
1389 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SERDES,
1390 	  "DH89XXCC Gigabit Ethernet (SERDES)",
1391 	  WM_T_82580,		WMP_F_SERDES },
1392 
1393 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_BPLANE,
1394 	  "DH89XXCC 1000BASE-KX Ethernet",
1395 	  WM_T_82580,		WMP_F_SERDES },
1396 
1397 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_DH89XXCC_SFP,
1398 	  "DH89XXCC Gigabit Ethernet (SFP)",
1399 	  WM_T_82580,		WMP_F_SERDES },
1400 
1401 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_COPPER,
1402 	  "I350 Gigabit Network Connection",
1403 	  WM_T_I350,		WMP_F_COPPER },
1404 
1405 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_FIBER,
1406 	  "I350 Gigabit Fiber Network Connection",
1407 	  WM_T_I350,		WMP_F_FIBER },
1408 
1409 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SERDES,
1410 	  "I350 Gigabit Backplane Connection",
1411 	  WM_T_I350,		WMP_F_SERDES },
1412 
1413 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_DA4,
1414 	  "I350 Quad Port Gigabit Ethernet",
1415 	  WM_T_I350,		WMP_F_SERDES },
1416 
1417 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I350_SGMII,
1418 	  "I350 Gigabit Connection",
1419 	  WM_T_I350,		WMP_F_COPPER },
1420 
1421 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_1000KX,
1422 	  "I354 Gigabit Ethernet (KX)",
1423 	  WM_T_I354,		WMP_F_SERDES },
1424 
1425 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_SGMII,
1426 	  "I354 Gigabit Ethernet (SGMII)",
1427 	  WM_T_I354,		WMP_F_COPPER },
1428 
1429 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_C2000_25GBE,
1430 	  "I354 Gigabit Ethernet (2.5G)",
1431 	  WM_T_I354,		WMP_F_COPPER },
1432 
1433 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_T1,
1434 	  "I210-T1 Ethernet Server Adapter",
1435 	  WM_T_I210,		WMP_F_COPPER },
1436 
1437 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_OEM1,
1438 	  "I210 Ethernet (Copper OEM)",
1439 	  WM_T_I210,		WMP_F_COPPER },
1440 
1441 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_IT,
1442 	  "I210 Ethernet (Copper IT)",
1443 	  WM_T_I210,		WMP_F_COPPER },
1444 
1445 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_COPPER_WOF,
1446 	  "I210 Ethernet (FLASH less)",
1447 	  WM_T_I210,		WMP_F_COPPER },
1448 
1449 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_FIBER,
1450 	  "I210 Gigabit Ethernet (Fiber)",
1451 	  WM_T_I210,		WMP_F_FIBER },
1452 
1453 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES,
1454 	  "I210 Gigabit Ethernet (SERDES)",
1455 	  WM_T_I210,		WMP_F_SERDES },
1456 
1457 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SERDES_WOF,
1458 	  "I210 Gigabit Ethernet (FLASH less)",
1459 	  WM_T_I210,		WMP_F_SERDES },
1460 
1461 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I210_SGMII,
1462 	  "I210 Gigabit Ethernet (SGMII)",
1463 	  WM_T_I210,		WMP_F_COPPER },
1464 
1465 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I211_COPPER,
1466 	  "I211 Ethernet (COPPER)",
1467 	  WM_T_I211,		WMP_F_COPPER },
1468 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_V,
1469 	  "I217 V Ethernet Connection",
1470 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1471 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I217_LM,
1472 	  "I217 LM Ethernet Connection",
1473 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1474 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V,
1475 	  "I218 V Ethernet Connection",
1476 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1477 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V2,
1478 	  "I218 V Ethernet Connection",
1479 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1480 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_V3,
1481 	  "I218 V Ethernet Connection",
1482 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1483 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM,
1484 	  "I218 LM Ethernet Connection",
1485 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1486 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM2,
1487 	  "I218 LM Ethernet Connection",
1488 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1489 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I218_LM3,
1490 	  "I218 LM Ethernet Connection",
1491 	  WM_T_PCH_LPT,		WMP_F_COPPER },
1492 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V,
1493 	  "I219 V Ethernet Connection",
1494 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1495 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V2,
1496 	  "I219 V Ethernet Connection",
1497 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1498 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V4,
1499 	  "I219 V Ethernet Connection",
1500 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1501 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V5,
1502 	  "I219 V Ethernet Connection",
1503 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1504 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM,
1505 	  "I219 LM Ethernet Connection",
1506 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1507 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM2,
1508 	  "I219 LM Ethernet Connection",
1509 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1510 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM3,
1511 	  "I219 LM Ethernet Connection",
1512 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1513 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM4,
1514 	  "I219 LM Ethernet Connection",
1515 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1516 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM5,
1517 	  "I219 LM Ethernet Connection",
1518 	  WM_T_PCH_SPT,		WMP_F_COPPER },
1519 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V6,
1520 	  "I219 V Ethernet Connection",
1521 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1522 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_V7,
1523 	  "I219 V Ethernet Connection",
1524 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1525 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM6,
1526 	  "I219 LM Ethernet Connection",
1527 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1528 	{ PCI_VENDOR_INTEL,	PCI_PRODUCT_INTEL_I219_LM7,
1529 	  "I219 LM Ethernet Connection",
1530 	  WM_T_PCH_CNP,		WMP_F_COPPER },
1531 	{ 0,			0,
1532 	  NULL,
1533 	  0,			0 },
1534 };
1535 
1536 /*
1537  * Register read/write functions.
1538  * Other than CSR_{READ|WRITE}().
1539  */
1540 
1541 #if 0 /* Not currently used */
1542 static inline uint32_t
1543 wm_io_read(struct wm_softc *sc, int reg)
1544 {
1545 
1546 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1547 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, 4));
1548 }
1549 #endif
1550 
1551 static inline void
1552 wm_io_write(struct wm_softc *sc, int reg, uint32_t val)
1553 {
1554 
1555 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 0, reg);
1556 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, 4, val);
1557 }
1558 
1559 static inline void
1560 wm_82575_write_8bit_ctlr_reg(struct wm_softc *sc, uint32_t reg, uint32_t off,
1561     uint32_t data)
1562 {
1563 	uint32_t regval;
1564 	int i;
1565 
1566 	regval = (data & SCTL_CTL_DATA_MASK) | (off << SCTL_CTL_ADDR_SHIFT);
1567 
1568 	CSR_WRITE(sc, reg, regval);
1569 
1570 	for (i = 0; i < SCTL_CTL_POLL_TIMEOUT; i++) {
1571 		delay(5);
1572 		if (CSR_READ(sc, reg) & SCTL_CTL_READY)
1573 			break;
1574 	}
1575 	if (i == SCTL_CTL_POLL_TIMEOUT) {
1576 		aprint_error("%s: WARNING:"
1577 		    " i82575 reg 0x%08x setup did not indicate ready\n",
1578 		    device_xname(sc->sc_dev), reg);
1579 	}
1580 }
1581 
1582 static inline void
1583 wm_set_dma_addr(volatile wiseman_addr_t *wa, bus_addr_t v)
1584 {
1585 	wa->wa_low = htole32(v & 0xffffffffU);
1586 	if (sizeof(bus_addr_t) == 8)
1587 		wa->wa_high = htole32((uint64_t) v >> 32);
1588 	else
1589 		wa->wa_high = 0;
1590 }
1591 
1592 /*
1593  * Descriptor sync/init functions.
1594  */
1595 static inline void
1596 wm_cdtxsync(struct wm_txqueue *txq, int start, int num, int ops)
1597 {
1598 	struct wm_softc *sc = txq->txq_sc;
1599 
1600 	/* If it will wrap around, sync to the end of the ring. */
1601 	if ((start + num) > WM_NTXDESC(txq)) {
1602 		bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1603 		    WM_CDTXOFF(txq, start), txq->txq_descsize *
1604 		    (WM_NTXDESC(txq) - start), ops);
1605 		num -= (WM_NTXDESC(txq) - start);
1606 		start = 0;
1607 	}
1608 
1609 	/* Now sync whatever is left. */
1610 	bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_dmamap,
1611 	    WM_CDTXOFF(txq, start), txq->txq_descsize * num, ops);
1612 }
1613 
1614 static inline void
1615 wm_cdrxsync(struct wm_rxqueue *rxq, int start, int ops)
1616 {
1617 	struct wm_softc *sc = rxq->rxq_sc;
1618 
1619 	bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_dmamap,
1620 	    WM_CDRXOFF(rxq, start), rxq->rxq_descsize, ops);
1621 }
1622 
1623 static inline void
1624 wm_init_rxdesc(struct wm_rxqueue *rxq, int start)
1625 {
1626 	struct wm_softc *sc = rxq->rxq_sc;
1627 	struct wm_rxsoft *rxs = &rxq->rxq_soft[start];
1628 	struct mbuf *m = rxs->rxs_mbuf;
1629 
1630 	/*
1631 	 * Note: We scoot the packet forward 2 bytes in the buffer
1632 	 * so that the payload after the Ethernet header is aligned
1633 	 * to a 4-byte boundary.
1634 
1635 	 * XXX BRAINDAMAGE ALERT!
1636 	 * The stupid chip uses the same size for every buffer, which
1637 	 * is set in the Receive Control register.  We are using the 2K
1638 	 * size option, but what we REALLY want is (2K - 2)!  For this
1639 	 * reason, we can't "scoot" packets longer than the standard
1640 	 * Ethernet MTU.  On strict-alignment platforms, if the total
1641 	 * size exceeds (2K - 2) we set align_tweak to 0 and let
1642 	 * the upper layer copy the headers.
1643 	 */
1644 	m->m_data = m->m_ext.ext_buf + sc->sc_align_tweak;
1645 
1646 	if (sc->sc_type == WM_T_82574) {
1647 		ext_rxdesc_t *rxd = &rxq->rxq_ext_descs[start];
1648 		rxd->erx_data.erxd_addr =
1649 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1650 		rxd->erx_data.erxd_dd = 0;
1651 	} else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
1652 		nq_rxdesc_t *rxd = &rxq->rxq_nq_descs[start];
1653 
1654 		rxd->nqrx_data.nrxd_paddr =
1655 		    htole64(rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1656 		/* Currently, split header is not supported. */
1657 		rxd->nqrx_data.nrxd_haddr = 0;
1658 	} else {
1659 		wiseman_rxdesc_t *rxd = &rxq->rxq_descs[start];
1660 
1661 		wm_set_dma_addr(&rxd->wrx_addr,
1662 		    rxs->rxs_dmamap->dm_segs[0].ds_addr + sc->sc_align_tweak);
1663 		rxd->wrx_len = 0;
1664 		rxd->wrx_cksum = 0;
1665 		rxd->wrx_status = 0;
1666 		rxd->wrx_errors = 0;
1667 		rxd->wrx_special = 0;
1668 	}
1669 	wm_cdrxsync(rxq, start, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1670 
1671 	CSR_WRITE(sc, rxq->rxq_rdt_reg, start);
1672 }
1673 
1674 /*
1675  * Device driver interface functions and commonly used functions.
1676  * match, attach, detach, init, start, stop, ioctl, watchdog and so on.
1677  */
1678 
1679 /* Lookup supported device table */
1680 static const struct wm_product *
1681 wm_lookup(const struct pci_attach_args *pa)
1682 {
1683 	const struct wm_product *wmp;
1684 
1685 	for (wmp = wm_products; wmp->wmp_name != NULL; wmp++) {
1686 		if (PCI_VENDOR(pa->pa_id) == wmp->wmp_vendor &&
1687 		    PCI_PRODUCT(pa->pa_id) == wmp->wmp_product)
1688 			return wmp;
1689 	}
1690 	return NULL;
1691 }
1692 
1693 /* The match function (ca_match) */
1694 static int
1695 wm_match(device_t parent, cfdata_t cf, void *aux)
1696 {
1697 	struct pci_attach_args *pa = aux;
1698 
1699 	if (wm_lookup(pa) != NULL)
1700 		return 1;
1701 
1702 	return 0;
1703 }
1704 
1705 /* The attach function (ca_attach) */
1706 static void
1707 wm_attach(device_t parent, device_t self, void *aux)
1708 {
1709 	struct wm_softc *sc = device_private(self);
1710 	struct pci_attach_args *pa = aux;
1711 	prop_dictionary_t dict;
1712 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1713 	pci_chipset_tag_t pc = pa->pa_pc;
1714 	int counts[PCI_INTR_TYPE_SIZE];
1715 	pci_intr_type_t max_type;
1716 	const char *eetype, *xname;
1717 	bus_space_tag_t memt;
1718 	bus_space_handle_t memh;
1719 	bus_size_t memsize;
1720 	int memh_valid;
1721 	int i, error;
1722 	const struct wm_product *wmp;
1723 	prop_data_t ea;
1724 	prop_number_t pn;
1725 	uint8_t enaddr[ETHER_ADDR_LEN];
1726 	char buf[256];
1727 	uint16_t cfg1, cfg2, swdpin, nvmword;
1728 	pcireg_t preg, memtype;
1729 	uint16_t eeprom_data, apme_mask;
1730 	bool force_clear_smbi;
1731 	uint32_t link_mode;
1732 	uint32_t reg;
1733 
1734 	sc->sc_dev = self;
1735 	callout_init(&sc->sc_tick_ch, CALLOUT_FLAGS);
1736 	sc->sc_core_stopping = false;
1737 
1738 	wmp = wm_lookup(pa);
1739 #ifdef DIAGNOSTIC
1740 	if (wmp == NULL) {
1741 		printf("\n");
1742 		panic("wm_attach: impossible");
1743 	}
1744 #endif
1745 	sc->sc_mediatype = WMP_MEDIATYPE(wmp->wmp_flags);
1746 
1747 	sc->sc_pc = pa->pa_pc;
1748 	sc->sc_pcitag = pa->pa_tag;
1749 
1750 	if (pci_dma64_available(pa))
1751 		sc->sc_dmat = pa->pa_dmat64;
1752 	else
1753 		sc->sc_dmat = pa->pa_dmat;
1754 
1755 	sc->sc_pcidevid = PCI_PRODUCT(pa->pa_id);
1756 	sc->sc_rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag,PCI_CLASS_REG));
1757 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", wmp->wmp_name, 1);
1758 
1759 	sc->sc_type = wmp->wmp_type;
1760 
1761 	/* Set default function pointers */
1762 	sc->phy.acquire = sc->nvm.acquire = wm_get_null;
1763 	sc->phy.release = sc->nvm.release = wm_put_null;
1764 	sc->phy.reset_delay_us = (sc->sc_type >= WM_T_82571) ? 100 : 10000;
1765 
1766 	if (sc->sc_type < WM_T_82543) {
1767 		if (sc->sc_rev < 2) {
1768 			aprint_error_dev(sc->sc_dev,
1769 			    "i82542 must be at least rev. 2\n");
1770 			return;
1771 		}
1772 		if (sc->sc_rev < 3)
1773 			sc->sc_type = WM_T_82542_2_0;
1774 	}
1775 
1776 	/*
1777 	 * Disable MSI for Errata:
1778 	 * "Message Signaled Interrupt Feature May Corrupt Write Transactions"
1779 	 *
1780 	 *  82544: Errata 25
1781 	 *  82540: Errata  6 (easy to reproduce device timeout)
1782 	 *  82545: Errata  4 (easy to reproduce device timeout)
1783 	 *  82546: Errata 26 (easy to reproduce device timeout)
1784 	 *  82541: Errata  7 (easy to reproduce device timeout)
1785 	 *
1786 	 * "Byte Enables 2 and 3 are not set on MSI writes"
1787 	 *
1788 	 *  82571 & 82572: Errata 63
1789 	 */
1790 	if ((sc->sc_type <= WM_T_82541_2) || (sc->sc_type == WM_T_82571)
1791 	    || (sc->sc_type == WM_T_82572))
1792 		pa->pa_flags &= ~PCI_FLAGS_MSI_OKAY;
1793 
1794 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1795 	    || (sc->sc_type == WM_T_82580)
1796 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
1797 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
1798 		sc->sc_flags |= WM_F_NEWQUEUE;
1799 
1800 	/* Set device properties (mactype) */
1801 	dict = device_properties(sc->sc_dev);
1802 	prop_dictionary_set_uint32(dict, "mactype", sc->sc_type);
1803 
1804 	/*
1805 	 * Map the device.  All devices support memory-mapped acccess,
1806 	 * and it is really required for normal operation.
1807 	 */
1808 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, WM_PCI_MMBA);
1809 	switch (memtype) {
1810 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT:
1811 	case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT:
1812 		memh_valid = (pci_mapreg_map(pa, WM_PCI_MMBA,
1813 			memtype, 0, &memt, &memh, NULL, &memsize) == 0);
1814 		break;
1815 	default:
1816 		memh_valid = 0;
1817 		break;
1818 	}
1819 
1820 	if (memh_valid) {
1821 		sc->sc_st = memt;
1822 		sc->sc_sh = memh;
1823 		sc->sc_ss = memsize;
1824 	} else {
1825 		aprint_error_dev(sc->sc_dev,
1826 		    "unable to map device registers\n");
1827 		return;
1828 	}
1829 
1830 	/*
1831 	 * In addition, i82544 and later support I/O mapped indirect
1832 	 * register access.  It is not desirable (nor supported in
1833 	 * this driver) to use it for normal operation, though it is
1834 	 * required to work around bugs in some chip versions.
1835 	 */
1836 	if (sc->sc_type >= WM_T_82544) {
1837 		/* First we have to find the I/O BAR. */
1838 		for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) {
1839 			memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, i);
1840 			if (memtype == PCI_MAPREG_TYPE_IO)
1841 				break;
1842 			if (PCI_MAPREG_MEM_TYPE(memtype) ==
1843 			    PCI_MAPREG_MEM_TYPE_64BIT)
1844 				i += 4;	/* skip high bits, too */
1845 		}
1846 		if (i < PCI_MAPREG_END) {
1847 			/*
1848 			 * We found PCI_MAPREG_TYPE_IO. Note that 82580
1849 			 * (and newer?) chip has no PCI_MAPREG_TYPE_IO.
1850 			 * It's no problem because newer chips has no this
1851 			 * bug.
1852 			 *
1853 			 * The i8254x doesn't apparently respond when the
1854 			 * I/O BAR is 0, which looks somewhat like it's not
1855 			 * been configured.
1856 			 */
1857 			preg = pci_conf_read(pc, pa->pa_tag, i);
1858 			if (PCI_MAPREG_MEM_ADDR(preg) == 0) {
1859 				aprint_error_dev(sc->sc_dev,
1860 				    "WARNING: I/O BAR at zero.\n");
1861 			} else if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_IO,
1862 					0, &sc->sc_iot, &sc->sc_ioh,
1863 					NULL, &sc->sc_ios) == 0) {
1864 				sc->sc_flags |= WM_F_IOH_VALID;
1865 			} else {
1866 				aprint_error_dev(sc->sc_dev,
1867 				    "WARNING: unable to map I/O space\n");
1868 			}
1869 		}
1870 
1871 	}
1872 
1873 	/* Enable bus mastering.  Disable MWI on the i82542 2.0. */
1874 	preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1875 	preg |= PCI_COMMAND_MASTER_ENABLE;
1876 	if (sc->sc_type < WM_T_82542_2_1)
1877 		preg &= ~PCI_COMMAND_INVALIDATE_ENABLE;
1878 	pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg);
1879 
1880 	/* power up chip */
1881 	if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL))
1882 	    && error != EOPNOTSUPP) {
1883 		aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error);
1884 		return;
1885 	}
1886 
1887 	wm_adjust_qnum(sc, pci_msix_count(pa->pa_pc, pa->pa_tag));
1888 	/*
1889 	 *  Don't use MSI-X if we can use only one queue to save interrupt
1890 	 * resource.
1891 	 */
1892 	if (sc->sc_nqueues > 1) {
1893 		max_type = PCI_INTR_TYPE_MSIX;
1894 		/*
1895 		 *  82583 has a MSI-X capability in the PCI configuration space
1896 		 * but it doesn't support it. At least the document doesn't
1897 		 * say anything about MSI-X.
1898 		 */
1899 		counts[PCI_INTR_TYPE_MSIX]
1900 		    = (sc->sc_type == WM_T_82583) ? 0 : sc->sc_nqueues + 1;
1901 	} else {
1902 		max_type = PCI_INTR_TYPE_MSI;
1903 		counts[PCI_INTR_TYPE_MSIX] = 0;
1904 	}
1905 
1906 	/* Allocation settings */
1907 	counts[PCI_INTR_TYPE_MSI] = 1;
1908 	counts[PCI_INTR_TYPE_INTX] = 1;
1909 	/* overridden by disable flags */
1910 	if (wm_disable_msi != 0) {
1911 		counts[PCI_INTR_TYPE_MSI] = 0;
1912 		if (wm_disable_msix != 0) {
1913 			max_type = PCI_INTR_TYPE_INTX;
1914 			counts[PCI_INTR_TYPE_MSIX] = 0;
1915 		}
1916 	} else if (wm_disable_msix != 0) {
1917 		max_type = PCI_INTR_TYPE_MSI;
1918 		counts[PCI_INTR_TYPE_MSIX] = 0;
1919 	}
1920 
1921 alloc_retry:
1922 	if (pci_intr_alloc(pa, &sc->sc_intrs, counts, max_type) != 0) {
1923 		aprint_error_dev(sc->sc_dev, "failed to allocate interrupt\n");
1924 		return;
1925 	}
1926 
1927 	if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSIX) {
1928 		error = wm_setup_msix(sc);
1929 		if (error) {
1930 			pci_intr_release(pc, sc->sc_intrs,
1931 			    counts[PCI_INTR_TYPE_MSIX]);
1932 
1933 			/* Setup for MSI: Disable MSI-X */
1934 			max_type = PCI_INTR_TYPE_MSI;
1935 			counts[PCI_INTR_TYPE_MSI] = 1;
1936 			counts[PCI_INTR_TYPE_INTX] = 1;
1937 			goto alloc_retry;
1938 		}
1939 	} else if (pci_intr_type(pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_MSI) {
1940 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1941 		error = wm_setup_legacy(sc);
1942 		if (error) {
1943 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1944 			    counts[PCI_INTR_TYPE_MSI]);
1945 
1946 			/* The next try is for INTx: Disable MSI */
1947 			max_type = PCI_INTR_TYPE_INTX;
1948 			counts[PCI_INTR_TYPE_INTX] = 1;
1949 			goto alloc_retry;
1950 		}
1951 	} else {
1952 		wm_adjust_qnum(sc, 0);	/* must not use multiqueue */
1953 		error = wm_setup_legacy(sc);
1954 		if (error) {
1955 			pci_intr_release(sc->sc_pc, sc->sc_intrs,
1956 			    counts[PCI_INTR_TYPE_INTX]);
1957 			return;
1958 		}
1959 	}
1960 
1961 	/*
1962 	 * Check the function ID (unit number of the chip).
1963 	 */
1964 	if ((sc->sc_type == WM_T_82546) || (sc->sc_type == WM_T_82546_3)
1965 	    || (sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_80003)
1966 	    || (sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
1967 	    || (sc->sc_type == WM_T_82580)
1968 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
1969 		sc->sc_funcid = (CSR_READ(sc, WMREG_STATUS)
1970 		    >> STATUS_FUNCID_SHIFT) & STATUS_FUNCID_MASK;
1971 	else
1972 		sc->sc_funcid = 0;
1973 
1974 	/*
1975 	 * Determine a few things about the bus we're connected to.
1976 	 */
1977 	if (sc->sc_type < WM_T_82543) {
1978 		/* We don't really know the bus characteristics here. */
1979 		sc->sc_bus_speed = 33;
1980 	} else if (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) {
1981 		/*
1982 		 * CSA (Communication Streaming Architecture) is about as fast
1983 		 * a 32-bit 66MHz PCI Bus.
1984 		 */
1985 		sc->sc_flags |= WM_F_CSA;
1986 		sc->sc_bus_speed = 66;
1987 		aprint_verbose_dev(sc->sc_dev,
1988 		    "Communication Streaming Architecture\n");
1989 		if (sc->sc_type == WM_T_82547) {
1990 			callout_init(&sc->sc_txfifo_ch, CALLOUT_FLAGS);
1991 			callout_setfunc(&sc->sc_txfifo_ch,
1992 			    wm_82547_txfifo_stall, sc);
1993 			aprint_verbose_dev(sc->sc_dev,
1994 			    "using 82547 Tx FIFO stall work-around\n");
1995 		}
1996 	} else if (sc->sc_type >= WM_T_82571) {
1997 		sc->sc_flags |= WM_F_PCIE;
1998 		if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
1999 		    && (sc->sc_type != WM_T_ICH10)
2000 		    && (sc->sc_type != WM_T_PCH)
2001 		    && (sc->sc_type != WM_T_PCH2)
2002 		    && (sc->sc_type != WM_T_PCH_LPT)
2003 		    && (sc->sc_type != WM_T_PCH_SPT)
2004 		    && (sc->sc_type != WM_T_PCH_CNP)) {
2005 			/* ICH* and PCH* have no PCIe capability registers */
2006 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2007 				PCI_CAP_PCIEXPRESS, &sc->sc_pcixe_capoff,
2008 				NULL) == 0)
2009 				aprint_error_dev(sc->sc_dev,
2010 				    "unable to find PCIe capability\n");
2011 		}
2012 		aprint_verbose_dev(sc->sc_dev, "PCI-Express bus\n");
2013 	} else {
2014 		reg = CSR_READ(sc, WMREG_STATUS);
2015 		if (reg & STATUS_BUS64)
2016 			sc->sc_flags |= WM_F_BUS64;
2017 		if ((reg & STATUS_PCIX_MODE) != 0) {
2018 			pcireg_t pcix_cmd, pcix_sts, bytecnt, maxb;
2019 
2020 			sc->sc_flags |= WM_F_PCIX;
2021 			if (pci_get_capability(pa->pa_pc, pa->pa_tag,
2022 				PCI_CAP_PCIX, &sc->sc_pcixe_capoff, NULL) == 0)
2023 				aprint_error_dev(sc->sc_dev,
2024 				    "unable to find PCIX capability\n");
2025 			else if (sc->sc_type != WM_T_82545_3 &&
2026 				 sc->sc_type != WM_T_82546_3) {
2027 				/*
2028 				 * Work around a problem caused by the BIOS
2029 				 * setting the max memory read byte count
2030 				 * incorrectly.
2031 				 */
2032 				pcix_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag,
2033 				    sc->sc_pcixe_capoff + PCIX_CMD);
2034 				pcix_sts = pci_conf_read(pa->pa_pc, pa->pa_tag,
2035 				    sc->sc_pcixe_capoff + PCIX_STATUS);
2036 
2037 				bytecnt = (pcix_cmd & PCIX_CMD_BYTECNT_MASK) >>
2038 				    PCIX_CMD_BYTECNT_SHIFT;
2039 				maxb = (pcix_sts & PCIX_STATUS_MAXB_MASK) >>
2040 				    PCIX_STATUS_MAXB_SHIFT;
2041 				if (bytecnt > maxb) {
2042 					aprint_verbose_dev(sc->sc_dev,
2043 					    "resetting PCI-X MMRBC: %d -> %d\n",
2044 					    512 << bytecnt, 512 << maxb);
2045 					pcix_cmd = (pcix_cmd &
2046 					    ~PCIX_CMD_BYTECNT_MASK) |
2047 					    (maxb << PCIX_CMD_BYTECNT_SHIFT);
2048 					pci_conf_write(pa->pa_pc, pa->pa_tag,
2049 					    sc->sc_pcixe_capoff + PCIX_CMD,
2050 					    pcix_cmd);
2051 				}
2052 			}
2053 		}
2054 		/*
2055 		 * The quad port adapter is special; it has a PCIX-PCIX
2056 		 * bridge on the board, and can run the secondary bus at
2057 		 * a higher speed.
2058 		 */
2059 		if (wmp->wmp_product == PCI_PRODUCT_INTEL_82546EB_QUAD) {
2060 			sc->sc_bus_speed = (sc->sc_flags & WM_F_PCIX) ? 120
2061 								      : 66;
2062 		} else if (sc->sc_flags & WM_F_PCIX) {
2063 			switch (reg & STATUS_PCIXSPD_MASK) {
2064 			case STATUS_PCIXSPD_50_66:
2065 				sc->sc_bus_speed = 66;
2066 				break;
2067 			case STATUS_PCIXSPD_66_100:
2068 				sc->sc_bus_speed = 100;
2069 				break;
2070 			case STATUS_PCIXSPD_100_133:
2071 				sc->sc_bus_speed = 133;
2072 				break;
2073 			default:
2074 				aprint_error_dev(sc->sc_dev,
2075 				    "unknown PCIXSPD %d; assuming 66MHz\n",
2076 				    reg & STATUS_PCIXSPD_MASK);
2077 				sc->sc_bus_speed = 66;
2078 				break;
2079 			}
2080 		} else
2081 			sc->sc_bus_speed = (reg & STATUS_PCI66) ? 66 : 33;
2082 		aprint_verbose_dev(sc->sc_dev, "%d-bit %dMHz %s bus\n",
2083 		    (sc->sc_flags & WM_F_BUS64) ? 64 : 32, sc->sc_bus_speed,
2084 		    (sc->sc_flags & WM_F_PCIX) ? "PCIX" : "PCI");
2085 	}
2086 
2087 	/* Disable ASPM L0s and/or L1 for workaround */
2088 	wm_disable_aspm(sc);
2089 
2090 	/* clear interesting stat counters */
2091 	CSR_READ(sc, WMREG_COLC);
2092 	CSR_READ(sc, WMREG_RXERRC);
2093 
2094 	if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)
2095 	    || (sc->sc_type >= WM_T_ICH8))
2096 		sc->sc_ich_phymtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2097 	if (sc->sc_type >= WM_T_ICH8)
2098 		sc->sc_ich_nvmmtx = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2099 
2100 	/* Set PHY, NVM mutex related stuff */
2101 	switch (sc->sc_type) {
2102 	case WM_T_82542_2_0:
2103 	case WM_T_82542_2_1:
2104 	case WM_T_82543:
2105 	case WM_T_82544:
2106 		/* Microwire */
2107 		sc->nvm.read = wm_nvm_read_uwire;
2108 		sc->sc_nvm_wordsize = 64;
2109 		sc->sc_nvm_addrbits = 6;
2110 		break;
2111 	case WM_T_82540:
2112 	case WM_T_82545:
2113 	case WM_T_82545_3:
2114 	case WM_T_82546:
2115 	case WM_T_82546_3:
2116 		/* Microwire */
2117 		sc->nvm.read = wm_nvm_read_uwire;
2118 		reg = CSR_READ(sc, WMREG_EECD);
2119 		if (reg & EECD_EE_SIZE) {
2120 			sc->sc_nvm_wordsize = 256;
2121 			sc->sc_nvm_addrbits = 8;
2122 		} else {
2123 			sc->sc_nvm_wordsize = 64;
2124 			sc->sc_nvm_addrbits = 6;
2125 		}
2126 		sc->sc_flags |= WM_F_LOCK_EECD;
2127 		sc->nvm.acquire = wm_get_eecd;
2128 		sc->nvm.release = wm_put_eecd;
2129 		break;
2130 	case WM_T_82541:
2131 	case WM_T_82541_2:
2132 	case WM_T_82547:
2133 	case WM_T_82547_2:
2134 		reg = CSR_READ(sc, WMREG_EECD);
2135 		/*
2136 		 * wm_nvm_set_addrbits_size_eecd() accesses SPI in it only
2137 		 * on 8254[17], so set flags and functios before calling it.
2138 		 */
2139 		sc->sc_flags |= WM_F_LOCK_EECD;
2140 		sc->nvm.acquire = wm_get_eecd;
2141 		sc->nvm.release = wm_put_eecd;
2142 		if (reg & EECD_EE_TYPE) {
2143 			/* SPI */
2144 			sc->nvm.read = wm_nvm_read_spi;
2145 			sc->sc_flags |= WM_F_EEPROM_SPI;
2146 			wm_nvm_set_addrbits_size_eecd(sc);
2147 		} else {
2148 			/* Microwire */
2149 			sc->nvm.read = wm_nvm_read_uwire;
2150 			if ((reg & EECD_EE_ABITS) != 0) {
2151 				sc->sc_nvm_wordsize = 256;
2152 				sc->sc_nvm_addrbits = 8;
2153 			} else {
2154 				sc->sc_nvm_wordsize = 64;
2155 				sc->sc_nvm_addrbits = 6;
2156 			}
2157 		}
2158 		break;
2159 	case WM_T_82571:
2160 	case WM_T_82572:
2161 		/* SPI */
2162 		sc->nvm.read = wm_nvm_read_eerd;
2163 		/* Not use WM_F_LOCK_EECD because we use EERD */
2164 		sc->sc_flags |= WM_F_EEPROM_SPI;
2165 		wm_nvm_set_addrbits_size_eecd(sc);
2166 		sc->phy.acquire = wm_get_swsm_semaphore;
2167 		sc->phy.release = wm_put_swsm_semaphore;
2168 		sc->nvm.acquire = wm_get_nvm_82571;
2169 		sc->nvm.release = wm_put_nvm_82571;
2170 		break;
2171 	case WM_T_82573:
2172 	case WM_T_82574:
2173 	case WM_T_82583:
2174 		sc->nvm.read = wm_nvm_read_eerd;
2175 		/* Not use WM_F_LOCK_EECD because we use EERD */
2176 		if (sc->sc_type == WM_T_82573) {
2177 			sc->phy.acquire = wm_get_swsm_semaphore;
2178 			sc->phy.release = wm_put_swsm_semaphore;
2179 			sc->nvm.acquire = wm_get_nvm_82571;
2180 			sc->nvm.release = wm_put_nvm_82571;
2181 		} else {
2182 			/* Both PHY and NVM use the same semaphore. */
2183 			sc->phy.acquire = sc->nvm.acquire
2184 			    = wm_get_swfwhw_semaphore;
2185 			sc->phy.release = sc->nvm.release
2186 			    = wm_put_swfwhw_semaphore;
2187 		}
2188 		if (wm_nvm_is_onboard_eeprom(sc) == 0) {
2189 			sc->sc_flags |= WM_F_EEPROM_FLASH;
2190 			sc->sc_nvm_wordsize = 2048;
2191 		} else {
2192 			/* SPI */
2193 			sc->sc_flags |= WM_F_EEPROM_SPI;
2194 			wm_nvm_set_addrbits_size_eecd(sc);
2195 		}
2196 		break;
2197 	case WM_T_82575:
2198 	case WM_T_82576:
2199 	case WM_T_82580:
2200 	case WM_T_I350:
2201 	case WM_T_I354:
2202 	case WM_T_80003:
2203 		/* SPI */
2204 		sc->sc_flags |= WM_F_EEPROM_SPI;
2205 		wm_nvm_set_addrbits_size_eecd(sc);
2206 		if ((sc->sc_type == WM_T_80003)
2207 		    || (sc->sc_nvm_wordsize < (1 << 15))) {
2208 			sc->nvm.read = wm_nvm_read_eerd;
2209 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2210 		} else {
2211 			sc->nvm.read = wm_nvm_read_spi;
2212 			sc->sc_flags |= WM_F_LOCK_EECD;
2213 		}
2214 		sc->phy.acquire = wm_get_phy_82575;
2215 		sc->phy.release = wm_put_phy_82575;
2216 		sc->nvm.acquire = wm_get_nvm_80003;
2217 		sc->nvm.release = wm_put_nvm_80003;
2218 		break;
2219 	case WM_T_ICH8:
2220 	case WM_T_ICH9:
2221 	case WM_T_ICH10:
2222 	case WM_T_PCH:
2223 	case WM_T_PCH2:
2224 	case WM_T_PCH_LPT:
2225 		sc->nvm.read = wm_nvm_read_ich8;
2226 		/* FLASH */
2227 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2228 		sc->sc_nvm_wordsize = 2048;
2229 		memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,WM_ICH8_FLASH);
2230 		if (pci_mapreg_map(pa, WM_ICH8_FLASH, memtype, 0,
2231 		    &sc->sc_flasht, &sc->sc_flashh, NULL, &sc->sc_flashs)) {
2232 			aprint_error_dev(sc->sc_dev,
2233 			    "can't map FLASH registers\n");
2234 			goto out;
2235 		}
2236 		reg = ICH8_FLASH_READ32(sc, ICH_FLASH_GFPREG);
2237 		sc->sc_ich8_flash_base = (reg & ICH_GFPREG_BASE_MASK) *
2238 		    ICH_FLASH_SECTOR_SIZE;
2239 		sc->sc_ich8_flash_bank_size =
2240 		    ((reg >> 16) & ICH_GFPREG_BASE_MASK) + 1;
2241 		sc->sc_ich8_flash_bank_size -= (reg & ICH_GFPREG_BASE_MASK);
2242 		sc->sc_ich8_flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
2243 		sc->sc_ich8_flash_bank_size /= 2 * sizeof(uint16_t);
2244 		sc->sc_flashreg_offset = 0;
2245 		sc->phy.acquire = wm_get_swflag_ich8lan;
2246 		sc->phy.release = wm_put_swflag_ich8lan;
2247 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2248 		sc->nvm.release = wm_put_nvm_ich8lan;
2249 		break;
2250 	case WM_T_PCH_SPT:
2251 	case WM_T_PCH_CNP:
2252 		sc->nvm.read = wm_nvm_read_spt;
2253 		/* SPT has no GFPREG; flash registers mapped through BAR0 */
2254 		sc->sc_flags |= WM_F_EEPROM_FLASH;
2255 		sc->sc_flasht = sc->sc_st;
2256 		sc->sc_flashh = sc->sc_sh;
2257 		sc->sc_ich8_flash_base = 0;
2258 		sc->sc_nvm_wordsize =
2259 		    (((CSR_READ(sc, WMREG_STRAP) >> 1) & 0x1F) + 1)
2260 		    * NVM_SIZE_MULTIPLIER;
2261 		/* It is size in bytes, we want words */
2262 		sc->sc_nvm_wordsize /= 2;
2263 		/* assume 2 banks */
2264 		sc->sc_ich8_flash_bank_size = sc->sc_nvm_wordsize / 2;
2265 		sc->sc_flashreg_offset = WM_PCH_SPT_FLASHOFFSET;
2266 		sc->phy.acquire = wm_get_swflag_ich8lan;
2267 		sc->phy.release = wm_put_swflag_ich8lan;
2268 		sc->nvm.acquire = wm_get_nvm_ich8lan;
2269 		sc->nvm.release = wm_put_nvm_ich8lan;
2270 		break;
2271 	case WM_T_I210:
2272 	case WM_T_I211:
2273 		/* Allow a single clear of the SW semaphore on I210 and newer*/
2274 		sc->sc_flags |= WM_F_WA_I210_CLSEM;
2275 		if (wm_nvm_flash_presence_i210(sc)) {
2276 			sc->nvm.read = wm_nvm_read_eerd;
2277 			/* Don't use WM_F_LOCK_EECD because we use EERD */
2278 			sc->sc_flags |= WM_F_EEPROM_FLASH_HW;
2279 			wm_nvm_set_addrbits_size_eecd(sc);
2280 		} else {
2281 			sc->nvm.read = wm_nvm_read_invm;
2282 			sc->sc_flags |= WM_F_EEPROM_INVM;
2283 			sc->sc_nvm_wordsize = INVM_SIZE;
2284 		}
2285 		sc->phy.acquire = wm_get_phy_82575;
2286 		sc->phy.release = wm_put_phy_82575;
2287 		sc->nvm.acquire = wm_get_nvm_80003;
2288 		sc->nvm.release = wm_put_nvm_80003;
2289 		break;
2290 	default:
2291 		break;
2292 	}
2293 
2294 	/* Ensure the SMBI bit is clear before first NVM or PHY access */
2295 	switch (sc->sc_type) {
2296 	case WM_T_82571:
2297 	case WM_T_82572:
2298 		reg = CSR_READ(sc, WMREG_SWSM2);
2299 		if ((reg & SWSM2_LOCK) == 0) {
2300 			CSR_WRITE(sc, WMREG_SWSM2, reg | SWSM2_LOCK);
2301 			force_clear_smbi = true;
2302 		} else
2303 			force_clear_smbi = false;
2304 		break;
2305 	case WM_T_82573:
2306 	case WM_T_82574:
2307 	case WM_T_82583:
2308 		force_clear_smbi = true;
2309 		break;
2310 	default:
2311 		force_clear_smbi = false;
2312 		break;
2313 	}
2314 	if (force_clear_smbi) {
2315 		reg = CSR_READ(sc, WMREG_SWSM);
2316 		if ((reg & SWSM_SMBI) != 0)
2317 			aprint_error_dev(sc->sc_dev,
2318 			    "Please update the Bootagent\n");
2319 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_SMBI);
2320 	}
2321 
2322 	/*
2323 	 * Defer printing the EEPROM type until after verifying the checksum
2324 	 * This allows the EEPROM type to be printed correctly in the case
2325 	 * that no EEPROM is attached.
2326 	 */
2327 	/*
2328 	 * Validate the EEPROM checksum. If the checksum fails, flag
2329 	 * this for later, so we can fail future reads from the EEPROM.
2330 	 */
2331 	if (wm_nvm_validate_checksum(sc)) {
2332 		/*
2333 		 * Read twice again because some PCI-e parts fail the
2334 		 * first check due to the link being in sleep state.
2335 		 */
2336 		if (wm_nvm_validate_checksum(sc))
2337 			sc->sc_flags |= WM_F_EEPROM_INVALID;
2338 	}
2339 
2340 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
2341 		aprint_verbose_dev(sc->sc_dev, "No EEPROM");
2342 	else {
2343 		aprint_verbose_dev(sc->sc_dev, "%u words ",
2344 		    sc->sc_nvm_wordsize);
2345 		if (sc->sc_flags & WM_F_EEPROM_INVM)
2346 			aprint_verbose("iNVM");
2347 		else if (sc->sc_flags & WM_F_EEPROM_FLASH_HW)
2348 			aprint_verbose("FLASH(HW)");
2349 		else if (sc->sc_flags & WM_F_EEPROM_FLASH)
2350 			aprint_verbose("FLASH");
2351 		else {
2352 			if (sc->sc_flags & WM_F_EEPROM_SPI)
2353 				eetype = "SPI";
2354 			else
2355 				eetype = "MicroWire";
2356 			aprint_verbose("(%d address bits) %s EEPROM",
2357 			    sc->sc_nvm_addrbits, eetype);
2358 		}
2359 	}
2360 	wm_nvm_version(sc);
2361 	aprint_verbose("\n");
2362 
2363 	/*
2364 	 * XXX The first call of wm_gmii_setup_phytype. The result might be
2365 	 * incorrect.
2366 	 */
2367 	wm_gmii_setup_phytype(sc, 0, 0);
2368 
2369 	/* Reset the chip to a known state. */
2370 	wm_reset(sc);
2371 
2372 	/*
2373 	 * Check for I21[01] PLL workaround.
2374 	 *
2375 	 * Three cases:
2376 	 * a) Chip is I211.
2377 	 * b) Chip is I210 and it uses INVM (not FLASH).
2378 	 * c) Chip is I210 (and it uses FLASH) and the NVM image version < 3.25
2379 	 */
2380 	if (sc->sc_type == WM_T_I211)
2381 		sc->sc_flags |= WM_F_PLL_WA_I210;
2382 	if (sc->sc_type == WM_T_I210) {
2383 		if (!wm_nvm_flash_presence_i210(sc))
2384 			sc->sc_flags |= WM_F_PLL_WA_I210;
2385 		else if ((sc->sc_nvm_ver_major < 3)
2386 		    || ((sc->sc_nvm_ver_major == 3)
2387 			&& (sc->sc_nvm_ver_minor < 25))) {
2388 			aprint_verbose_dev(sc->sc_dev,
2389 			    "ROM image version %d.%d is older than 3.25\n",
2390 			    sc->sc_nvm_ver_major, sc->sc_nvm_ver_minor);
2391 			sc->sc_flags |= WM_F_PLL_WA_I210;
2392 		}
2393 	}
2394 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
2395 		wm_pll_workaround_i210(sc);
2396 
2397 	wm_get_wakeup(sc);
2398 
2399 	/* Non-AMT based hardware can now take control from firmware */
2400 	if ((sc->sc_flags & WM_F_HAS_AMT) == 0)
2401 		wm_get_hw_control(sc);
2402 
2403 	/*
2404 	 * Read the Ethernet address from the EEPROM, if not first found
2405 	 * in device properties.
2406 	 */
2407 	ea = prop_dictionary_get(dict, "mac-address");
2408 	if (ea != NULL) {
2409 		KASSERT(prop_object_type(ea) == PROP_TYPE_DATA);
2410 		KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN);
2411 		memcpy(enaddr, prop_data_data_nocopy(ea), ETHER_ADDR_LEN);
2412 	} else {
2413 		if (wm_read_mac_addr(sc, enaddr) != 0) {
2414 			aprint_error_dev(sc->sc_dev,
2415 			    "unable to read Ethernet address\n");
2416 			goto out;
2417 		}
2418 	}
2419 
2420 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
2421 	    ether_sprintf(enaddr));
2422 
2423 	/*
2424 	 * Read the config info from the EEPROM, and set up various
2425 	 * bits in the control registers based on their contents.
2426 	 */
2427 	pn = prop_dictionary_get(dict, "i82543-cfg1");
2428 	if (pn != NULL) {
2429 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2430 		cfg1 = (uint16_t) prop_number_integer_value(pn);
2431 	} else {
2432 		if (wm_nvm_read(sc, NVM_OFF_CFG1, 1, &cfg1)) {
2433 			aprint_error_dev(sc->sc_dev, "unable to read CFG1\n");
2434 			goto out;
2435 		}
2436 	}
2437 
2438 	pn = prop_dictionary_get(dict, "i82543-cfg2");
2439 	if (pn != NULL) {
2440 		KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2441 		cfg2 = (uint16_t) prop_number_integer_value(pn);
2442 	} else {
2443 		if (wm_nvm_read(sc, NVM_OFF_CFG2, 1, &cfg2)) {
2444 			aprint_error_dev(sc->sc_dev, "unable to read CFG2\n");
2445 			goto out;
2446 		}
2447 	}
2448 
2449 	/* check for WM_F_WOL */
2450 	switch (sc->sc_type) {
2451 	case WM_T_82542_2_0:
2452 	case WM_T_82542_2_1:
2453 	case WM_T_82543:
2454 		/* dummy? */
2455 		eeprom_data = 0;
2456 		apme_mask = NVM_CFG3_APME;
2457 		break;
2458 	case WM_T_82544:
2459 		apme_mask = NVM_CFG2_82544_APM_EN;
2460 		eeprom_data = cfg2;
2461 		break;
2462 	case WM_T_82546:
2463 	case WM_T_82546_3:
2464 	case WM_T_82571:
2465 	case WM_T_82572:
2466 	case WM_T_82573:
2467 	case WM_T_82574:
2468 	case WM_T_82583:
2469 	case WM_T_80003:
2470 	default:
2471 		apme_mask = NVM_CFG3_APME;
2472 		wm_nvm_read(sc, (sc->sc_funcid == 1) ? NVM_OFF_CFG3_PORTB
2473 		    : NVM_OFF_CFG3_PORTA, 1, &eeprom_data);
2474 		break;
2475 	case WM_T_82575:
2476 	case WM_T_82576:
2477 	case WM_T_82580:
2478 	case WM_T_I350:
2479 	case WM_T_I354: /* XXX ok? */
2480 	case WM_T_ICH8:
2481 	case WM_T_ICH9:
2482 	case WM_T_ICH10:
2483 	case WM_T_PCH:
2484 	case WM_T_PCH2:
2485 	case WM_T_PCH_LPT:
2486 	case WM_T_PCH_SPT:
2487 	case WM_T_PCH_CNP:
2488 		/* XXX The funcid should be checked on some devices */
2489 		apme_mask = WUC_APME;
2490 		eeprom_data = CSR_READ(sc, WMREG_WUC);
2491 		break;
2492 	}
2493 
2494 	/* Check for WM_F_WOL flag after the setting of the EEPROM stuff */
2495 	if ((eeprom_data & apme_mask) != 0)
2496 		sc->sc_flags |= WM_F_WOL;
2497 
2498 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)) {
2499 		/* Check NVM for autonegotiation */
2500 		if (wm_nvm_read(sc, NVM_OFF_COMPAT, 1, &nvmword) == 0) {
2501 			if ((nvmword & NVM_COMPAT_SERDES_FORCE_MODE) != 0)
2502 				sc->sc_flags |= WM_F_PCS_DIS_AUTONEGO;
2503 		}
2504 	}
2505 
2506 	/*
2507 	 * XXX need special handling for some multiple port cards
2508 	 * to disable a paticular port.
2509 	 */
2510 
2511 	if (sc->sc_type >= WM_T_82544) {
2512 		pn = prop_dictionary_get(dict, "i82543-swdpin");
2513 		if (pn != NULL) {
2514 			KASSERT(prop_object_type(pn) == PROP_TYPE_NUMBER);
2515 			swdpin = (uint16_t) prop_number_integer_value(pn);
2516 		} else {
2517 			if (wm_nvm_read(sc, NVM_OFF_SWDPIN, 1, &swdpin)) {
2518 				aprint_error_dev(sc->sc_dev,
2519 				    "unable to read SWDPIN\n");
2520 				goto out;
2521 			}
2522 		}
2523 	}
2524 
2525 	if (cfg1 & NVM_CFG1_ILOS)
2526 		sc->sc_ctrl |= CTRL_ILOS;
2527 
2528 	/*
2529 	 * XXX
2530 	 * This code isn't correct because pin 2 and 3 are located
2531 	 * in different position on newer chips. Check all datasheet.
2532 	 *
2533 	 * Until resolve this problem, check if a chip < 82580
2534 	 */
2535 	if (sc->sc_type <= WM_T_82580) {
2536 		if (sc->sc_type >= WM_T_82544) {
2537 			sc->sc_ctrl |=
2538 			    ((swdpin >> NVM_SWDPIN_SWDPIO_SHIFT) & 0xf) <<
2539 			    CTRL_SWDPIO_SHIFT;
2540 			sc->sc_ctrl |=
2541 			    ((swdpin >> NVM_SWDPIN_SWDPIN_SHIFT) & 0xf) <<
2542 			    CTRL_SWDPINS_SHIFT;
2543 		} else {
2544 			sc->sc_ctrl |=
2545 			    ((cfg1 >> NVM_CFG1_SWDPIO_SHIFT) & 0xf) <<
2546 			    CTRL_SWDPIO_SHIFT;
2547 		}
2548 	}
2549 
2550 	/* XXX For other than 82580? */
2551 	if (sc->sc_type == WM_T_82580) {
2552 		wm_nvm_read(sc, NVM_OFF_CFG3_PORTA, 1, &nvmword);
2553 		if (nvmword & __BIT(13))
2554 			sc->sc_ctrl |= CTRL_ILOS;
2555 	}
2556 
2557 #if 0
2558 	if (sc->sc_type >= WM_T_82544) {
2559 		if (cfg1 & NVM_CFG1_IPS0)
2560 			sc->sc_ctrl_ext |= CTRL_EXT_IPS;
2561 		if (cfg1 & NVM_CFG1_IPS1)
2562 			sc->sc_ctrl_ext |= CTRL_EXT_IPS1;
2563 		sc->sc_ctrl_ext |=
2564 		    ((swdpin >> (NVM_SWDPIN_SWDPIO_SHIFT + 4)) & 0xd) <<
2565 		    CTRL_EXT_SWDPIO_SHIFT;
2566 		sc->sc_ctrl_ext |=
2567 		    ((swdpin >> (NVM_SWDPIN_SWDPIN_SHIFT + 4)) & 0xd) <<
2568 		    CTRL_EXT_SWDPINS_SHIFT;
2569 	} else {
2570 		sc->sc_ctrl_ext |=
2571 		    ((cfg2 >> NVM_CFG2_SWDPIO_SHIFT) & 0xf) <<
2572 		    CTRL_EXT_SWDPIO_SHIFT;
2573 	}
2574 #endif
2575 
2576 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
2577 #if 0
2578 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
2579 #endif
2580 
2581 	if (sc->sc_type == WM_T_PCH) {
2582 		uint16_t val;
2583 
2584 		/* Save the NVM K1 bit setting */
2585 		wm_nvm_read(sc, NVM_OFF_K1_CONFIG, 1, &val);
2586 
2587 		if ((val & NVM_K1_CONFIG_ENABLE) != 0)
2588 			sc->sc_nvm_k1_enabled = 1;
2589 		else
2590 			sc->sc_nvm_k1_enabled = 0;
2591 	}
2592 
2593 	/* Determine if we're GMII, TBI, SERDES or SGMII mode */
2594 	if (sc->sc_type == WM_T_ICH8 || sc->sc_type == WM_T_ICH9
2595 	    || sc->sc_type == WM_T_ICH10 || sc->sc_type == WM_T_PCH
2596 	    || sc->sc_type == WM_T_PCH2 || sc->sc_type == WM_T_PCH_LPT
2597 	    || sc->sc_type == WM_T_PCH_SPT || sc->sc_type == WM_T_PCH_CNP
2598 	    || sc->sc_type == WM_T_82573
2599 	    || sc->sc_type == WM_T_82574 || sc->sc_type == WM_T_82583) {
2600 		/* Copper only */
2601 	} else if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
2602 	    || (sc->sc_type ==WM_T_82580) || (sc->sc_type ==WM_T_I350)
2603 	    || (sc->sc_type ==WM_T_I354) || (sc->sc_type ==WM_T_I210)
2604 	    || (sc->sc_type ==WM_T_I211)) {
2605 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
2606 		link_mode = reg & CTRL_EXT_LINK_MODE_MASK;
2607 		switch (link_mode) {
2608 		case CTRL_EXT_LINK_MODE_1000KX:
2609 			aprint_verbose_dev(sc->sc_dev, "1000KX\n");
2610 			sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2611 			break;
2612 		case CTRL_EXT_LINK_MODE_SGMII:
2613 			if (wm_sgmii_uses_mdio(sc)) {
2614 				aprint_verbose_dev(sc->sc_dev,
2615 				    "SGMII(MDIO)\n");
2616 				sc->sc_flags |= WM_F_SGMII;
2617 				sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2618 				break;
2619 			}
2620 			aprint_verbose_dev(sc->sc_dev, "SGMII(I2C)\n");
2621 			/*FALLTHROUGH*/
2622 		case CTRL_EXT_LINK_MODE_PCIE_SERDES:
2623 			sc->sc_mediatype = wm_sfp_get_media_type(sc);
2624 			if (sc->sc_mediatype == WM_MEDIATYPE_UNKNOWN) {
2625 				if (link_mode
2626 				    == CTRL_EXT_LINK_MODE_SGMII) {
2627 					sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2628 					sc->sc_flags |= WM_F_SGMII;
2629 				} else {
2630 					sc->sc_mediatype = WM_MEDIATYPE_SERDES;
2631 					aprint_verbose_dev(sc->sc_dev,
2632 					    "SERDES\n");
2633 				}
2634 				break;
2635 			}
2636 			if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
2637 				aprint_verbose_dev(sc->sc_dev, "SERDES\n");
2638 
2639 			/* Change current link mode setting */
2640 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
2641 			switch (sc->sc_mediatype) {
2642 			case WM_MEDIATYPE_COPPER:
2643 				reg |= CTRL_EXT_LINK_MODE_SGMII;
2644 				break;
2645 			case WM_MEDIATYPE_SERDES:
2646 				reg |= CTRL_EXT_LINK_MODE_PCIE_SERDES;
2647 				break;
2648 			default:
2649 				break;
2650 			}
2651 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2652 			break;
2653 		case CTRL_EXT_LINK_MODE_GMII:
2654 		default:
2655 			aprint_verbose_dev(sc->sc_dev, "Copper\n");
2656 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2657 			break;
2658 		}
2659 
2660 		reg &= ~CTRL_EXT_I2C_ENA;
2661 		if ((sc->sc_flags & WM_F_SGMII) != 0)
2662 			reg |= CTRL_EXT_I2C_ENA;
2663 		else
2664 			reg &= ~CTRL_EXT_I2C_ENA;
2665 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
2666 	} else if (sc->sc_type < WM_T_82543 ||
2667 	    (CSR_READ(sc, WMREG_STATUS) & STATUS_TBIMODE) != 0) {
2668 		if (sc->sc_mediatype == WM_MEDIATYPE_COPPER) {
2669 			aprint_error_dev(sc->sc_dev,
2670 			    "WARNING: TBIMODE set on 1000BASE-T product!\n");
2671 			sc->sc_mediatype = WM_MEDIATYPE_FIBER;
2672 		}
2673 	} else {
2674 		if (sc->sc_mediatype == WM_MEDIATYPE_FIBER) {
2675 			aprint_error_dev(sc->sc_dev,
2676 			    "WARNING: TBIMODE clear on 1000BASE-X product!\n");
2677 			sc->sc_mediatype = WM_MEDIATYPE_COPPER;
2678 		}
2679 	}
2680 	snprintb(buf, sizeof(buf), WM_FLAGS, sc->sc_flags);
2681 	aprint_verbose_dev(sc->sc_dev, "%s\n", buf);
2682 
2683 	/* Set device properties (macflags) */
2684 	prop_dictionary_set_uint32(dict, "macflags", sc->sc_flags);
2685 
2686 	/* Initialize the media structures accordingly. */
2687 	if (sc->sc_mediatype == WM_MEDIATYPE_COPPER)
2688 		wm_gmii_mediainit(sc, wmp->wmp_product);
2689 	else
2690 		wm_tbi_mediainit(sc); /* All others */
2691 
2692 	ifp = &sc->sc_ethercom.ec_if;
2693 	xname = device_xname(sc->sc_dev);
2694 	strlcpy(ifp->if_xname, xname, IFNAMSIZ);
2695 	ifp->if_softc = sc;
2696 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
2697 #ifdef WM_MPSAFE
2698 	ifp->if_extflags = IFEF_MPSAFE;
2699 #endif
2700 	ifp->if_ioctl = wm_ioctl;
2701 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
2702 		ifp->if_start = wm_nq_start;
2703 		/*
2704 		 * When the number of CPUs is one and the controller can use
2705 		 * MSI-X, wm(4) use MSI-X but *does not* use multiqueue.
2706 		 * That is, wm(4) use two interrupts, one is used for Tx/Rx
2707 		 * and the other is used for link status changing.
2708 		 * In this situation, wm_nq_transmit() is disadvantageous
2709 		 * because of wm_select_txqueue() and pcq(9) overhead.
2710 		 */
2711 		if (wm_is_using_multiqueue(sc))
2712 			ifp->if_transmit = wm_nq_transmit;
2713 	} else {
2714 		ifp->if_start = wm_start;
2715 		/*
2716 		 * wm_transmit() has the same disadvantage as wm_transmit().
2717 		 */
2718 		if (wm_is_using_multiqueue(sc))
2719 			ifp->if_transmit = wm_transmit;
2720 	}
2721 	/* wm(4) doest not use ifp->if_watchdog, use wm_tick as watchdog. */
2722 	ifp->if_init = wm_init;
2723 	ifp->if_stop = wm_stop;
2724 	IFQ_SET_MAXLEN(&ifp->if_snd, max(WM_IFQUEUELEN, IFQ_MAXLEN));
2725 	IFQ_SET_READY(&ifp->if_snd);
2726 
2727 	/* Check for jumbo frame */
2728 	switch (sc->sc_type) {
2729 	case WM_T_82573:
2730 		/* XXX limited to 9234 if ASPM is disabled */
2731 		wm_nvm_read(sc, NVM_OFF_INIT_3GIO_3, 1, &nvmword);
2732 		if ((nvmword & NVM_3GIO_3_ASPM_MASK) != 0)
2733 			sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2734 		break;
2735 	case WM_T_82571:
2736 	case WM_T_82572:
2737 	case WM_T_82574:
2738 	case WM_T_82583:
2739 	case WM_T_82575:
2740 	case WM_T_82576:
2741 	case WM_T_82580:
2742 	case WM_T_I350:
2743 	case WM_T_I354:
2744 	case WM_T_I210:
2745 	case WM_T_I211:
2746 	case WM_T_80003:
2747 	case WM_T_ICH9:
2748 	case WM_T_ICH10:
2749 	case WM_T_PCH2:	/* PCH2 supports 9K frame size */
2750 	case WM_T_PCH_LPT:
2751 	case WM_T_PCH_SPT:
2752 	case WM_T_PCH_CNP:
2753 		/* XXX limited to 9234 */
2754 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2755 		break;
2756 	case WM_T_PCH:
2757 		/* XXX limited to 4096 */
2758 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2759 		break;
2760 	case WM_T_82542_2_0:
2761 	case WM_T_82542_2_1:
2762 	case WM_T_ICH8:
2763 		/* No support for jumbo frame */
2764 		break;
2765 	default:
2766 		/* ETHER_MAX_LEN_JUMBO */
2767 		sc->sc_ethercom.ec_capabilities |= ETHERCAP_JUMBO_MTU;
2768 		break;
2769 	}
2770 
2771 	/* If we're a i82543 or greater, we can support VLANs. */
2772 	if (sc->sc_type >= WM_T_82543)
2773 		sc->sc_ethercom.ec_capabilities |=
2774 		    ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING;
2775 
2776 	/*
2777 	 * We can perform TCPv4 and UDPv4 checkums in-bound.  Only
2778 	 * on i82543 and later.
2779 	 */
2780 	if (sc->sc_type >= WM_T_82543) {
2781 		ifp->if_capabilities |=
2782 		    IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx |
2783 		    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
2784 		    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
2785 		    IFCAP_CSUM_TCPv6_Tx |
2786 		    IFCAP_CSUM_UDPv6_Tx;
2787 	}
2788 
2789 	/*
2790 	 * XXXyamt: i'm not sure which chips support RXCSUM_IPV6OFL.
2791 	 *
2792 	 *	82541GI (8086:1076) ... no
2793 	 *	82572EI (8086:10b9) ... yes
2794 	 */
2795 	if (sc->sc_type >= WM_T_82571) {
2796 		ifp->if_capabilities |=
2797 		    IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx;
2798 	}
2799 
2800 	/*
2801 	 * If we're a i82544 or greater (except i82547), we can do
2802 	 * TCP segmentation offload.
2803 	 */
2804 	if (sc->sc_type >= WM_T_82544 && sc->sc_type != WM_T_82547) {
2805 		ifp->if_capabilities |= IFCAP_TSOv4;
2806 	}
2807 
2808 	if (sc->sc_type >= WM_T_82571) {
2809 		ifp->if_capabilities |= IFCAP_TSOv6;
2810 	}
2811 
2812 	sc->sc_tx_process_limit = WM_TX_PROCESS_LIMIT_DEFAULT;
2813 	sc->sc_tx_intr_process_limit = WM_TX_INTR_PROCESS_LIMIT_DEFAULT;
2814 	sc->sc_rx_process_limit = WM_RX_PROCESS_LIMIT_DEFAULT;
2815 	sc->sc_rx_intr_process_limit = WM_RX_INTR_PROCESS_LIMIT_DEFAULT;
2816 
2817 #ifdef WM_MPSAFE
2818 	sc->sc_core_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
2819 #else
2820 	sc->sc_core_lock = NULL;
2821 #endif
2822 
2823 	/* Attach the interface. */
2824 	error = if_initialize(ifp);
2825 	if (error != 0) {
2826 		aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
2827 		    error);
2828 		return; /* Error */
2829 	}
2830 	sc->sc_ipq = if_percpuq_create(&sc->sc_ethercom.ec_if);
2831 	ether_ifattach(ifp, enaddr);
2832 	ether_set_ifflags_cb(&sc->sc_ethercom, wm_ifflags_cb);
2833 	if_register(ifp);
2834 	rnd_attach_source(&sc->rnd_source, xname, RND_TYPE_NET,
2835 	    RND_FLAG_DEFAULT);
2836 
2837 #ifdef WM_EVENT_COUNTERS
2838 	/* Attach event counters. */
2839 	evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR,
2840 	    NULL, xname, "linkintr");
2841 
2842 	evcnt_attach_dynamic(&sc->sc_ev_tx_xoff, EVCNT_TYPE_MISC,
2843 	    NULL, xname, "tx_xoff");
2844 	evcnt_attach_dynamic(&sc->sc_ev_tx_xon, EVCNT_TYPE_MISC,
2845 	    NULL, xname, "tx_xon");
2846 	evcnt_attach_dynamic(&sc->sc_ev_rx_xoff, EVCNT_TYPE_MISC,
2847 	    NULL, xname, "rx_xoff");
2848 	evcnt_attach_dynamic(&sc->sc_ev_rx_xon, EVCNT_TYPE_MISC,
2849 	    NULL, xname, "rx_xon");
2850 	evcnt_attach_dynamic(&sc->sc_ev_rx_macctl, EVCNT_TYPE_MISC,
2851 	    NULL, xname, "rx_macctl");
2852 #endif /* WM_EVENT_COUNTERS */
2853 
2854 	if (pmf_device_register(self, wm_suspend, wm_resume))
2855 		pmf_class_network_register(self, ifp);
2856 	else
2857 		aprint_error_dev(self, "couldn't establish power handler\n");
2858 
2859 	sc->sc_flags |= WM_F_ATTACHED;
2860  out:
2861 	return;
2862 }
2863 
2864 /* The detach function (ca_detach) */
2865 static int
2866 wm_detach(device_t self, int flags __unused)
2867 {
2868 	struct wm_softc *sc = device_private(self);
2869 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
2870 	int i;
2871 
2872 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
2873 		return 0;
2874 
2875 	/* Stop the interface. Callouts are stopped in it. */
2876 	wm_stop(ifp, 1);
2877 
2878 	pmf_device_deregister(self);
2879 
2880 #ifdef WM_EVENT_COUNTERS
2881 	evcnt_detach(&sc->sc_ev_linkintr);
2882 
2883 	evcnt_detach(&sc->sc_ev_tx_xoff);
2884 	evcnt_detach(&sc->sc_ev_tx_xon);
2885 	evcnt_detach(&sc->sc_ev_rx_xoff);
2886 	evcnt_detach(&sc->sc_ev_rx_xon);
2887 	evcnt_detach(&sc->sc_ev_rx_macctl);
2888 #endif /* WM_EVENT_COUNTERS */
2889 
2890 	/* Tell the firmware about the release */
2891 	WM_CORE_LOCK(sc);
2892 	wm_release_manageability(sc);
2893 	wm_release_hw_control(sc);
2894 	wm_enable_wakeup(sc);
2895 	WM_CORE_UNLOCK(sc);
2896 
2897 	mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY);
2898 
2899 	/* Delete all remaining media. */
2900 	ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY);
2901 
2902 	ether_ifdetach(ifp);
2903 	if_detach(ifp);
2904 	if_percpuq_destroy(sc->sc_ipq);
2905 
2906 	/* Unload RX dmamaps and free mbufs */
2907 	for (i = 0; i < sc->sc_nqueues; i++) {
2908 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
2909 		mutex_enter(rxq->rxq_lock);
2910 		wm_rxdrain(rxq);
2911 		mutex_exit(rxq->rxq_lock);
2912 	}
2913 	/* Must unlock here */
2914 
2915 	/* Disestablish the interrupt handler */
2916 	for (i = 0; i < sc->sc_nintrs; i++) {
2917 		if (sc->sc_ihs[i] != NULL) {
2918 			pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[i]);
2919 			sc->sc_ihs[i] = NULL;
2920 		}
2921 	}
2922 	pci_intr_release(sc->sc_pc, sc->sc_intrs, sc->sc_nintrs);
2923 
2924 	wm_free_txrx_queues(sc);
2925 
2926 	/* Unmap the registers */
2927 	if (sc->sc_ss) {
2928 		bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_ss);
2929 		sc->sc_ss = 0;
2930 	}
2931 	if (sc->sc_ios) {
2932 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
2933 		sc->sc_ios = 0;
2934 	}
2935 	if (sc->sc_flashs) {
2936 		bus_space_unmap(sc->sc_flasht, sc->sc_flashh, sc->sc_flashs);
2937 		sc->sc_flashs = 0;
2938 	}
2939 
2940 	if (sc->sc_core_lock)
2941 		mutex_obj_free(sc->sc_core_lock);
2942 	if (sc->sc_ich_phymtx)
2943 		mutex_obj_free(sc->sc_ich_phymtx);
2944 	if (sc->sc_ich_nvmmtx)
2945 		mutex_obj_free(sc->sc_ich_nvmmtx);
2946 
2947 	return 0;
2948 }
2949 
2950 static bool
2951 wm_suspend(device_t self, const pmf_qual_t *qual)
2952 {
2953 	struct wm_softc *sc = device_private(self);
2954 
2955 	wm_release_manageability(sc);
2956 	wm_release_hw_control(sc);
2957 	wm_enable_wakeup(sc);
2958 
2959 	return true;
2960 }
2961 
2962 static bool
2963 wm_resume(device_t self, const pmf_qual_t *qual)
2964 {
2965 	struct wm_softc *sc = device_private(self);
2966 
2967 	/* Disable ASPM L0s and/or L1 for workaround */
2968 	wm_disable_aspm(sc);
2969 	wm_init_manageability(sc);
2970 
2971 	return true;
2972 }
2973 
2974 /*
2975  * wm_watchdog:		[ifnet interface function]
2976  *
2977  *	Watchdog timer handler.
2978  */
2979 static void
2980 wm_watchdog(struct ifnet *ifp)
2981 {
2982 	int qid;
2983 	struct wm_softc *sc = ifp->if_softc;
2984 	uint16_t hang_queue = 0; /* Max queue number of wm(4) is 82576's 16. */
2985 
2986 	for (qid = 0; qid < sc->sc_nqueues; qid++) {
2987 		struct wm_txqueue *txq = &sc->sc_queue[qid].wmq_txq;
2988 
2989 		wm_watchdog_txq(ifp, txq, &hang_queue);
2990 	}
2991 
2992 	/*
2993 	 * IF any of queues hanged up, reset the interface.
2994 	 */
2995 	if (hang_queue != 0) {
2996 		(void) wm_init(ifp);
2997 
2998 		/*
2999 		 * There are still some upper layer processing which call
3000 		 * ifp->if_start(). e.g. ALTQ or one CPU system
3001 		 */
3002 		/* Try to get more packets going. */
3003 		ifp->if_start(ifp);
3004 	}
3005 }
3006 
3007 
3008 static void
3009 wm_watchdog_txq(struct ifnet *ifp, struct wm_txqueue *txq, uint16_t *hang)
3010 {
3011 
3012 	mutex_enter(txq->txq_lock);
3013 	if (txq->txq_sending &&
3014 	    time_uptime - txq->txq_lastsent > wm_watchdog_timeout) {
3015 		wm_watchdog_txq_locked(ifp, txq, hang);
3016 	}
3017 	mutex_exit(txq->txq_lock);
3018 }
3019 
3020 static void
3021 wm_watchdog_txq_locked(struct ifnet *ifp, struct wm_txqueue *txq,
3022     uint16_t *hang)
3023 {
3024 	struct wm_softc *sc = ifp->if_softc;
3025 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
3026 
3027 	KASSERT(mutex_owned(txq->txq_lock));
3028 
3029 	/*
3030 	 * Since we're using delayed interrupts, sweep up
3031 	 * before we report an error.
3032 	 */
3033 	wm_txeof(txq, UINT_MAX);
3034 
3035 	if (txq->txq_sending)
3036 		*hang |= __BIT(wmq->wmq_id);
3037 
3038 	if (txq->txq_free == WM_NTXDESC(txq)) {
3039 		log(LOG_ERR, "%s: device timeout (lost interrupt)\n",
3040 		    device_xname(sc->sc_dev));
3041 	} else {
3042 #ifdef WM_DEBUG
3043 		int i, j;
3044 		struct wm_txsoft *txs;
3045 #endif
3046 		log(LOG_ERR,
3047 		    "%s: device timeout (txfree %d txsfree %d txnext %d)\n",
3048 		    device_xname(sc->sc_dev), txq->txq_free, txq->txq_sfree,
3049 		    txq->txq_next);
3050 		ifp->if_oerrors++;
3051 #ifdef WM_DEBUG
3052 		for (i = txq->txq_sdirty; i != txq->txq_snext;
3053 		    i = WM_NEXTTXS(txq, i)) {
3054 		    txs = &txq->txq_soft[i];
3055 		    printf("txs %d tx %d -> %d\n",
3056 			i, txs->txs_firstdesc, txs->txs_lastdesc);
3057 		    for (j = txs->txs_firstdesc; ; j = WM_NEXTTX(txq, j)) {
3058 			    if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
3059 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3060 					txq->txq_nq_descs[j].nqtx_data.nqtxd_addr);
3061 				    printf("\t %#08x%08x\n",
3062 					txq->txq_nq_descs[j].nqtx_data.nqtxd_fields,
3063 					txq->txq_nq_descs[j].nqtx_data.nqtxd_cmdlen);
3064 			    } else {
3065 				    printf("\tdesc %d: 0x%" PRIx64 "\n", j,
3066 					(uint64_t)txq->txq_descs[j].wtx_addr.wa_high << 32 |
3067 					txq->txq_descs[j].wtx_addr.wa_low);
3068 				    printf("\t %#04x%02x%02x%08x\n",
3069 					txq->txq_descs[j].wtx_fields.wtxu_vlan,
3070 					txq->txq_descs[j].wtx_fields.wtxu_options,
3071 					txq->txq_descs[j].wtx_fields.wtxu_status,
3072 					txq->txq_descs[j].wtx_cmdlen);
3073 			    }
3074 			if (j == txs->txs_lastdesc)
3075 				break;
3076 			}
3077 		}
3078 #endif
3079 	}
3080 }
3081 
3082 /*
3083  * wm_tick:
3084  *
3085  *	One second timer, used to check link status, sweep up
3086  *	completed transmit jobs, etc.
3087  */
3088 static void
3089 wm_tick(void *arg)
3090 {
3091 	struct wm_softc *sc = arg;
3092 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3093 #ifndef WM_MPSAFE
3094 	int s = splnet();
3095 #endif
3096 
3097 	WM_CORE_LOCK(sc);
3098 
3099 	if (sc->sc_core_stopping) {
3100 		WM_CORE_UNLOCK(sc);
3101 #ifndef WM_MPSAFE
3102 		splx(s);
3103 #endif
3104 		return;
3105 	}
3106 
3107 	if (sc->sc_type >= WM_T_82542_2_1) {
3108 		WM_EVCNT_ADD(&sc->sc_ev_rx_xon, CSR_READ(sc, WMREG_XONRXC));
3109 		WM_EVCNT_ADD(&sc->sc_ev_tx_xon, CSR_READ(sc, WMREG_XONTXC));
3110 		WM_EVCNT_ADD(&sc->sc_ev_rx_xoff, CSR_READ(sc, WMREG_XOFFRXC));
3111 		WM_EVCNT_ADD(&sc->sc_ev_tx_xoff, CSR_READ(sc, WMREG_XOFFTXC));
3112 		WM_EVCNT_ADD(&sc->sc_ev_rx_macctl, CSR_READ(sc, WMREG_FCRUC));
3113 	}
3114 
3115 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
3116 	ifp->if_ierrors += 0ULL /* ensure quad_t */
3117 	    + CSR_READ(sc, WMREG_CRCERRS)
3118 	    + CSR_READ(sc, WMREG_ALGNERRC)
3119 	    + CSR_READ(sc, WMREG_SYMERRC)
3120 	    + CSR_READ(sc, WMREG_RXERRC)
3121 	    + CSR_READ(sc, WMREG_SEC)
3122 	    + CSR_READ(sc, WMREG_CEXTERR)
3123 	    + CSR_READ(sc, WMREG_RLEC);
3124 	/*
3125 	 * WMREG_RNBC is incremented when there is no available buffers in host
3126 	 * memory. It does not mean the number of dropped packet. Because
3127 	 * ethernet controller can receive packets in such case if there is
3128 	 * space in phy's FIFO.
3129 	 *
3130 	 * If you want to know the nubmer of WMREG_RMBC, you should use such as
3131 	 * own EVCNT instead of if_iqdrops.
3132 	 */
3133 	ifp->if_iqdrops += CSR_READ(sc, WMREG_MPC);
3134 
3135 	if (sc->sc_flags & WM_F_HAS_MII)
3136 		mii_tick(&sc->sc_mii);
3137 	else if ((sc->sc_type >= WM_T_82575)
3138 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
3139 		wm_serdes_tick(sc);
3140 	else
3141 		wm_tbi_tick(sc);
3142 
3143 	WM_CORE_UNLOCK(sc);
3144 
3145 	wm_watchdog(ifp);
3146 
3147 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
3148 }
3149 
3150 static int
3151 wm_ifflags_cb(struct ethercom *ec)
3152 {
3153 	struct ifnet *ifp = &ec->ec_if;
3154 	struct wm_softc *sc = ifp->if_softc;
3155 	int rc = 0;
3156 
3157 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3158 		device_xname(sc->sc_dev), __func__));
3159 
3160 	WM_CORE_LOCK(sc);
3161 
3162 	int change = ifp->if_flags ^ sc->sc_if_flags;
3163 	sc->sc_if_flags = ifp->if_flags;
3164 
3165 	if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
3166 		rc = ENETRESET;
3167 		goto out;
3168 	}
3169 
3170 	if ((change & (IFF_PROMISC | IFF_ALLMULTI)) != 0)
3171 		wm_set_filter(sc);
3172 
3173 	wm_set_vlan(sc);
3174 
3175 out:
3176 	WM_CORE_UNLOCK(sc);
3177 
3178 	return rc;
3179 }
3180 
3181 /*
3182  * wm_ioctl:		[ifnet interface function]
3183  *
3184  *	Handle control requests from the operator.
3185  */
3186 static int
3187 wm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3188 {
3189 	struct wm_softc *sc = ifp->if_softc;
3190 	struct ifreq *ifr = (struct ifreq *) data;
3191 	struct ifaddr *ifa = (struct ifaddr *)data;
3192 	struct sockaddr_dl *sdl;
3193 	int s, error;
3194 
3195 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3196 		device_xname(sc->sc_dev), __func__));
3197 
3198 #ifndef WM_MPSAFE
3199 	s = splnet();
3200 #endif
3201 	switch (cmd) {
3202 	case SIOCSIFMEDIA:
3203 	case SIOCGIFMEDIA:
3204 		WM_CORE_LOCK(sc);
3205 		/* Flow control requires full-duplex mode. */
3206 		if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO ||
3207 		    (ifr->ifr_media & IFM_FDX) == 0)
3208 			ifr->ifr_media &= ~IFM_ETH_FMASK;
3209 		if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) {
3210 			if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) {
3211 				/* We can do both TXPAUSE and RXPAUSE. */
3212 				ifr->ifr_media |=
3213 				    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
3214 			}
3215 			sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK;
3216 		}
3217 		WM_CORE_UNLOCK(sc);
3218 #ifdef WM_MPSAFE
3219 		s = splnet();
3220 #endif
3221 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd);
3222 #ifdef WM_MPSAFE
3223 		splx(s);
3224 #endif
3225 		break;
3226 	case SIOCINITIFADDR:
3227 		WM_CORE_LOCK(sc);
3228 		if (ifa->ifa_addr->sa_family == AF_LINK) {
3229 			sdl = satosdl(ifp->if_dl->ifa_addr);
3230 			(void)sockaddr_dl_setaddr(sdl, sdl->sdl_len,
3231 			    LLADDR(satosdl(ifa->ifa_addr)), ifp->if_addrlen);
3232 			/* unicast address is first multicast entry */
3233 			wm_set_filter(sc);
3234 			error = 0;
3235 			WM_CORE_UNLOCK(sc);
3236 			break;
3237 		}
3238 		WM_CORE_UNLOCK(sc);
3239 		/*FALLTHROUGH*/
3240 	default:
3241 #ifdef WM_MPSAFE
3242 		s = splnet();
3243 #endif
3244 		/* It may call wm_start, so unlock here */
3245 		error = ether_ioctl(ifp, cmd, data);
3246 #ifdef WM_MPSAFE
3247 		splx(s);
3248 #endif
3249 		if (error != ENETRESET)
3250 			break;
3251 
3252 		error = 0;
3253 
3254 		if (cmd == SIOCSIFCAP) {
3255 			error = (*ifp->if_init)(ifp);
3256 		} else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI)
3257 			;
3258 		else if (ifp->if_flags & IFF_RUNNING) {
3259 			/*
3260 			 * Multicast list has changed; set the hardware filter
3261 			 * accordingly.
3262 			 */
3263 			WM_CORE_LOCK(sc);
3264 			wm_set_filter(sc);
3265 			WM_CORE_UNLOCK(sc);
3266 		}
3267 		break;
3268 	}
3269 
3270 #ifndef WM_MPSAFE
3271 	splx(s);
3272 #endif
3273 	return error;
3274 }
3275 
3276 /* MAC address related */
3277 
3278 /*
3279  * Get the offset of MAC address and return it.
3280  * If error occured, use offset 0.
3281  */
3282 static uint16_t
3283 wm_check_alt_mac_addr(struct wm_softc *sc)
3284 {
3285 	uint16_t myea[ETHER_ADDR_LEN / 2];
3286 	uint16_t offset = NVM_OFF_MACADDR;
3287 
3288 	/* Try to read alternative MAC address pointer */
3289 	if (wm_nvm_read(sc, NVM_OFF_ALT_MAC_ADDR_PTR, 1, &offset) != 0)
3290 		return 0;
3291 
3292 	/* Check pointer if it's valid or not. */
3293 	if ((offset == 0x0000) || (offset == 0xffff))
3294 		return 0;
3295 
3296 	offset += NVM_OFF_MACADDR_82571(sc->sc_funcid);
3297 	/*
3298 	 * Check whether alternative MAC address is valid or not.
3299 	 * Some cards have non 0xffff pointer but those don't use
3300 	 * alternative MAC address in reality.
3301 	 *
3302 	 * Check whether the broadcast bit is set or not.
3303 	 */
3304 	if (wm_nvm_read(sc, offset, 1, myea) == 0)
3305 		if (((myea[0] & 0xff) & 0x01) == 0)
3306 			return offset; /* Found */
3307 
3308 	/* Not found */
3309 	return 0;
3310 }
3311 
3312 static int
3313 wm_read_mac_addr(struct wm_softc *sc, uint8_t *enaddr)
3314 {
3315 	uint16_t myea[ETHER_ADDR_LEN / 2];
3316 	uint16_t offset = NVM_OFF_MACADDR;
3317 	int do_invert = 0;
3318 
3319 	switch (sc->sc_type) {
3320 	case WM_T_82580:
3321 	case WM_T_I350:
3322 	case WM_T_I354:
3323 		/* EEPROM Top Level Partitioning */
3324 		offset = NVM_OFF_LAN_FUNC_82580(sc->sc_funcid) + 0;
3325 		break;
3326 	case WM_T_82571:
3327 	case WM_T_82575:
3328 	case WM_T_82576:
3329 	case WM_T_80003:
3330 	case WM_T_I210:
3331 	case WM_T_I211:
3332 		offset = wm_check_alt_mac_addr(sc);
3333 		if (offset == 0)
3334 			if ((sc->sc_funcid & 0x01) == 1)
3335 				do_invert = 1;
3336 		break;
3337 	default:
3338 		if ((sc->sc_funcid & 0x01) == 1)
3339 			do_invert = 1;
3340 		break;
3341 	}
3342 
3343 	if (wm_nvm_read(sc, offset, sizeof(myea) / sizeof(myea[0]), myea) != 0)
3344 		goto bad;
3345 
3346 	enaddr[0] = myea[0] & 0xff;
3347 	enaddr[1] = myea[0] >> 8;
3348 	enaddr[2] = myea[1] & 0xff;
3349 	enaddr[3] = myea[1] >> 8;
3350 	enaddr[4] = myea[2] & 0xff;
3351 	enaddr[5] = myea[2] >> 8;
3352 
3353 	/*
3354 	 * Toggle the LSB of the MAC address on the second port
3355 	 * of some dual port cards.
3356 	 */
3357 	if (do_invert != 0)
3358 		enaddr[5] ^= 1;
3359 
3360 	return 0;
3361 
3362  bad:
3363 	return -1;
3364 }
3365 
3366 /*
3367  * wm_set_ral:
3368  *
3369  *	Set an entery in the receive address list.
3370  */
3371 static void
3372 wm_set_ral(struct wm_softc *sc, const uint8_t *enaddr, int idx)
3373 {
3374 	uint32_t ral_lo, ral_hi, addrl, addrh;
3375 	uint32_t wlock_mac;
3376 	int rv;
3377 
3378 	if (enaddr != NULL) {
3379 		ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) |
3380 		    (enaddr[3] << 24);
3381 		ral_hi = enaddr[4] | (enaddr[5] << 8);
3382 		ral_hi |= RAL_AV;
3383 	} else {
3384 		ral_lo = 0;
3385 		ral_hi = 0;
3386 	}
3387 
3388 	switch (sc->sc_type) {
3389 	case WM_T_82542_2_0:
3390 	case WM_T_82542_2_1:
3391 	case WM_T_82543:
3392 		CSR_WRITE(sc, WMREG_RAL(idx), ral_lo);
3393 		CSR_WRITE_FLUSH(sc);
3394 		CSR_WRITE(sc, WMREG_RAH(idx), ral_hi);
3395 		CSR_WRITE_FLUSH(sc);
3396 		break;
3397 	case WM_T_PCH2:
3398 	case WM_T_PCH_LPT:
3399 	case WM_T_PCH_SPT:
3400 	case WM_T_PCH_CNP:
3401 		if (idx == 0) {
3402 			CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3403 			CSR_WRITE_FLUSH(sc);
3404 			CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3405 			CSR_WRITE_FLUSH(sc);
3406 			return;
3407 		}
3408 		if (sc->sc_type != WM_T_PCH2) {
3409 			wlock_mac = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM),
3410 			    FWSM_WLOCK_MAC);
3411 			addrl = WMREG_SHRAL(idx - 1);
3412 			addrh = WMREG_SHRAH(idx - 1);
3413 		} else {
3414 			wlock_mac = 0;
3415 			addrl = WMREG_PCH_LPT_SHRAL(idx - 1);
3416 			addrh = WMREG_PCH_LPT_SHRAH(idx - 1);
3417 		}
3418 
3419 		if ((wlock_mac == 0) || (idx <= wlock_mac)) {
3420 			rv = wm_get_swflag_ich8lan(sc);
3421 			if (rv != 0)
3422 				return;
3423 			CSR_WRITE(sc, addrl, ral_lo);
3424 			CSR_WRITE_FLUSH(sc);
3425 			CSR_WRITE(sc, addrh, ral_hi);
3426 			CSR_WRITE_FLUSH(sc);
3427 			wm_put_swflag_ich8lan(sc);
3428 		}
3429 
3430 		break;
3431 	default:
3432 		CSR_WRITE(sc, WMREG_CORDOVA_RAL(idx), ral_lo);
3433 		CSR_WRITE_FLUSH(sc);
3434 		CSR_WRITE(sc, WMREG_CORDOVA_RAH(idx), ral_hi);
3435 		CSR_WRITE_FLUSH(sc);
3436 		break;
3437 	}
3438 }
3439 
3440 /*
3441  * wm_mchash:
3442  *
3443  *	Compute the hash of the multicast address for the 4096-bit
3444  *	multicast filter.
3445  */
3446 static uint32_t
3447 wm_mchash(struct wm_softc *sc, const uint8_t *enaddr)
3448 {
3449 	static const int lo_shift[4] = { 4, 3, 2, 0 };
3450 	static const int hi_shift[4] = { 4, 5, 6, 8 };
3451 	static const int ich8_lo_shift[4] = { 6, 5, 4, 2 };
3452 	static const int ich8_hi_shift[4] = { 2, 3, 4, 6 };
3453 	uint32_t hash;
3454 
3455 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3456 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3457 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3458 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
3459 		hash = (enaddr[4] >> ich8_lo_shift[sc->sc_mchash_type]) |
3460 		    (((uint16_t) enaddr[5]) << ich8_hi_shift[sc->sc_mchash_type]);
3461 		return (hash & 0x3ff);
3462 	}
3463 	hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) |
3464 	    (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]);
3465 
3466 	return (hash & 0xfff);
3467 }
3468 
3469 /*
3470  * wm_set_filter:
3471  *
3472  *	Set up the receive filter.
3473  */
3474 static void
3475 wm_set_filter(struct wm_softc *sc)
3476 {
3477 	struct ethercom *ec = &sc->sc_ethercom;
3478 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
3479 	struct ether_multi *enm;
3480 	struct ether_multistep step;
3481 	bus_addr_t mta_reg;
3482 	uint32_t hash, reg, bit;
3483 	int i, size, ralmax;
3484 
3485 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3486 		device_xname(sc->sc_dev), __func__));
3487 
3488 	if (sc->sc_type >= WM_T_82544)
3489 		mta_reg = WMREG_CORDOVA_MTA;
3490 	else
3491 		mta_reg = WMREG_MTA;
3492 
3493 	sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE);
3494 
3495 	if (ifp->if_flags & IFF_BROADCAST)
3496 		sc->sc_rctl |= RCTL_BAM;
3497 	if (ifp->if_flags & IFF_PROMISC) {
3498 		sc->sc_rctl |= RCTL_UPE;
3499 		goto allmulti;
3500 	}
3501 
3502 	/*
3503 	 * Set the station address in the first RAL slot, and
3504 	 * clear the remaining slots.
3505 	 */
3506 	if (sc->sc_type == WM_T_ICH8)
3507 		size = WM_RAL_TABSIZE_ICH8 -1;
3508 	else if ((sc->sc_type == WM_T_ICH9) || (sc->sc_type == WM_T_ICH10)
3509 	    || (sc->sc_type == WM_T_PCH))
3510 		size = WM_RAL_TABSIZE_ICH8;
3511 	else if (sc->sc_type == WM_T_PCH2)
3512 		size = WM_RAL_TABSIZE_PCH2;
3513 	else if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3514 	    || (sc->sc_type == WM_T_PCH_CNP))
3515 		size = WM_RAL_TABSIZE_PCH_LPT;
3516 	else if (sc->sc_type == WM_T_82575)
3517 		size = WM_RAL_TABSIZE_82575;
3518 	else if ((sc->sc_type == WM_T_82576) || (sc->sc_type == WM_T_82580))
3519 		size = WM_RAL_TABSIZE_82576;
3520 	else if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
3521 		size = WM_RAL_TABSIZE_I350;
3522 	else
3523 		size = WM_RAL_TABSIZE;
3524 	wm_set_ral(sc, CLLADDR(ifp->if_sadl), 0);
3525 
3526 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
3527 	    || (sc->sc_type == WM_T_PCH_CNP)) {
3528 		i = __SHIFTOUT(CSR_READ(sc, WMREG_FWSM), FWSM_WLOCK_MAC);
3529 		switch (i) {
3530 		case 0:
3531 			/* We can use all entries */
3532 			ralmax = size;
3533 			break;
3534 		case 1:
3535 			/* Only RAR[0] */
3536 			ralmax = 1;
3537 			break;
3538 		default:
3539 			/* available SHRA + RAR[0] */
3540 			ralmax = i + 1;
3541 		}
3542 	} else
3543 		ralmax = size;
3544 	for (i = 1; i < size; i++) {
3545 		if (i < ralmax)
3546 			wm_set_ral(sc, NULL, i);
3547 	}
3548 
3549 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3550 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3551 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
3552 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP))
3553 		size = WM_ICH8_MC_TABSIZE;
3554 	else
3555 		size = WM_MC_TABSIZE;
3556 	/* Clear out the multicast table. */
3557 	for (i = 0; i < size; i++) {
3558 		CSR_WRITE(sc, mta_reg + (i << 2), 0);
3559 		CSR_WRITE_FLUSH(sc);
3560 	}
3561 
3562 	ETHER_LOCK(ec);
3563 	ETHER_FIRST_MULTI(step, ec, enm);
3564 	while (enm != NULL) {
3565 		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
3566 			ETHER_UNLOCK(ec);
3567 			/*
3568 			 * We must listen to a range of multicast addresses.
3569 			 * For now, just accept all multicasts, rather than
3570 			 * trying to set only those filter bits needed to match
3571 			 * the range.  (At this time, the only use of address
3572 			 * ranges is for IP multicast routing, for which the
3573 			 * range is big enough to require all bits set.)
3574 			 */
3575 			goto allmulti;
3576 		}
3577 
3578 		hash = wm_mchash(sc, enm->enm_addrlo);
3579 
3580 		reg = (hash >> 5);
3581 		if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
3582 		    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
3583 		    || (sc->sc_type == WM_T_PCH2)
3584 		    || (sc->sc_type == WM_T_PCH_LPT)
3585 		    || (sc->sc_type == WM_T_PCH_SPT)
3586 		    || (sc->sc_type == WM_T_PCH_CNP))
3587 			reg &= 0x1f;
3588 		else
3589 			reg &= 0x7f;
3590 		bit = hash & 0x1f;
3591 
3592 		hash = CSR_READ(sc, mta_reg + (reg << 2));
3593 		hash |= 1U << bit;
3594 
3595 		if (sc->sc_type == WM_T_82544 && (reg & 1) != 0) {
3596 			/*
3597 			 * 82544 Errata 9: Certain register cannot be written
3598 			 * with particular alignments in PCI-X bus operation
3599 			 * (FCAH, MTA and VFTA).
3600 			 */
3601 			bit = CSR_READ(sc, mta_reg + ((reg - 1) << 2));
3602 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3603 			CSR_WRITE_FLUSH(sc);
3604 			CSR_WRITE(sc, mta_reg + ((reg - 1) << 2), bit);
3605 			CSR_WRITE_FLUSH(sc);
3606 		} else {
3607 			CSR_WRITE(sc, mta_reg + (reg << 2), hash);
3608 			CSR_WRITE_FLUSH(sc);
3609 		}
3610 
3611 		ETHER_NEXT_MULTI(step, enm);
3612 	}
3613 	ETHER_UNLOCK(ec);
3614 
3615 	ifp->if_flags &= ~IFF_ALLMULTI;
3616 	goto setit;
3617 
3618  allmulti:
3619 	ifp->if_flags |= IFF_ALLMULTI;
3620 	sc->sc_rctl |= RCTL_MPE;
3621 
3622  setit:
3623 	CSR_WRITE(sc, WMREG_RCTL, sc->sc_rctl);
3624 }
3625 
3626 /* Reset and init related */
3627 
3628 static void
3629 wm_set_vlan(struct wm_softc *sc)
3630 {
3631 
3632 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3633 		device_xname(sc->sc_dev), __func__));
3634 
3635 	/* Deal with VLAN enables. */
3636 	if (VLAN_ATTACHED(&sc->sc_ethercom))
3637 		sc->sc_ctrl |= CTRL_VME;
3638 	else
3639 		sc->sc_ctrl &= ~CTRL_VME;
3640 
3641 	/* Write the control registers. */
3642 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
3643 }
3644 
3645 static void
3646 wm_set_pcie_completion_timeout(struct wm_softc *sc)
3647 {
3648 	uint32_t gcr;
3649 	pcireg_t ctrl2;
3650 
3651 	gcr = CSR_READ(sc, WMREG_GCR);
3652 
3653 	/* Only take action if timeout value is defaulted to 0 */
3654 	if ((gcr & GCR_CMPL_TMOUT_MASK) != 0)
3655 		goto out;
3656 
3657 	if ((gcr & GCR_CAP_VER2) == 0) {
3658 		gcr |= GCR_CMPL_TMOUT_10MS;
3659 		goto out;
3660 	}
3661 
3662 	ctrl2 = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
3663 	    sc->sc_pcixe_capoff + PCIE_DCSR2);
3664 	ctrl2 |= WM_PCIE_DCSR2_16MS;
3665 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
3666 	    sc->sc_pcixe_capoff + PCIE_DCSR2, ctrl2);
3667 
3668 out:
3669 	/* Disable completion timeout resend */
3670 	gcr &= ~GCR_CMPL_TMOUT_RESEND;
3671 
3672 	CSR_WRITE(sc, WMREG_GCR, gcr);
3673 }
3674 
3675 void
3676 wm_get_auto_rd_done(struct wm_softc *sc)
3677 {
3678 	int i;
3679 
3680 	/* wait for eeprom to reload */
3681 	switch (sc->sc_type) {
3682 	case WM_T_82571:
3683 	case WM_T_82572:
3684 	case WM_T_82573:
3685 	case WM_T_82574:
3686 	case WM_T_82583:
3687 	case WM_T_82575:
3688 	case WM_T_82576:
3689 	case WM_T_82580:
3690 	case WM_T_I350:
3691 	case WM_T_I354:
3692 	case WM_T_I210:
3693 	case WM_T_I211:
3694 	case WM_T_80003:
3695 	case WM_T_ICH8:
3696 	case WM_T_ICH9:
3697 		for (i = 0; i < 10; i++) {
3698 			if (CSR_READ(sc, WMREG_EECD) & EECD_EE_AUTORD)
3699 				break;
3700 			delay(1000);
3701 		}
3702 		if (i == 10) {
3703 			log(LOG_ERR, "%s: auto read from eeprom failed to "
3704 			    "complete\n", device_xname(sc->sc_dev));
3705 		}
3706 		break;
3707 	default:
3708 		break;
3709 	}
3710 }
3711 
3712 void
3713 wm_lan_init_done(struct wm_softc *sc)
3714 {
3715 	uint32_t reg = 0;
3716 	int i;
3717 
3718 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3719 		device_xname(sc->sc_dev), __func__));
3720 
3721 	/* Wait for eeprom to reload */
3722 	switch (sc->sc_type) {
3723 	case WM_T_ICH10:
3724 	case WM_T_PCH:
3725 	case WM_T_PCH2:
3726 	case WM_T_PCH_LPT:
3727 	case WM_T_PCH_SPT:
3728 	case WM_T_PCH_CNP:
3729 		for (i = 0; i < WM_ICH8_LAN_INIT_TIMEOUT; i++) {
3730 			reg = CSR_READ(sc, WMREG_STATUS);
3731 			if ((reg & STATUS_LAN_INIT_DONE) != 0)
3732 				break;
3733 			delay(100);
3734 		}
3735 		if (i >= WM_ICH8_LAN_INIT_TIMEOUT) {
3736 			log(LOG_ERR, "%s: %s: lan_init_done failed to "
3737 			    "complete\n", device_xname(sc->sc_dev), __func__);
3738 		}
3739 		break;
3740 	default:
3741 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3742 		    __func__);
3743 		break;
3744 	}
3745 
3746 	reg &= ~STATUS_LAN_INIT_DONE;
3747 	CSR_WRITE(sc, WMREG_STATUS, reg);
3748 }
3749 
3750 void
3751 wm_get_cfg_done(struct wm_softc *sc)
3752 {
3753 	int mask;
3754 	uint32_t reg;
3755 	int i;
3756 
3757 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3758 		device_xname(sc->sc_dev), __func__));
3759 
3760 	/* Wait for eeprom to reload */
3761 	switch (sc->sc_type) {
3762 	case WM_T_82542_2_0:
3763 	case WM_T_82542_2_1:
3764 		/* null */
3765 		break;
3766 	case WM_T_82543:
3767 	case WM_T_82544:
3768 	case WM_T_82540:
3769 	case WM_T_82545:
3770 	case WM_T_82545_3:
3771 	case WM_T_82546:
3772 	case WM_T_82546_3:
3773 	case WM_T_82541:
3774 	case WM_T_82541_2:
3775 	case WM_T_82547:
3776 	case WM_T_82547_2:
3777 	case WM_T_82573:
3778 	case WM_T_82574:
3779 	case WM_T_82583:
3780 		/* generic */
3781 		delay(10*1000);
3782 		break;
3783 	case WM_T_80003:
3784 	case WM_T_82571:
3785 	case WM_T_82572:
3786 	case WM_T_82575:
3787 	case WM_T_82576:
3788 	case WM_T_82580:
3789 	case WM_T_I350:
3790 	case WM_T_I354:
3791 	case WM_T_I210:
3792 	case WM_T_I211:
3793 		if (sc->sc_type == WM_T_82571) {
3794 			/* Only 82571 shares port 0 */
3795 			mask = EEMNGCTL_CFGDONE_0;
3796 		} else
3797 			mask = EEMNGCTL_CFGDONE_0 << sc->sc_funcid;
3798 		for (i = 0; i < WM_PHY_CFG_TIMEOUT; i++) {
3799 			if (CSR_READ(sc, WMREG_EEMNGCTL) & mask)
3800 				break;
3801 			delay(1000);
3802 		}
3803 		if (i >= WM_PHY_CFG_TIMEOUT) {
3804 			DPRINTF(WM_DEBUG_GMII, ("%s: %s failed\n",
3805 				device_xname(sc->sc_dev), __func__));
3806 		}
3807 		break;
3808 	case WM_T_ICH8:
3809 	case WM_T_ICH9:
3810 	case WM_T_ICH10:
3811 	case WM_T_PCH:
3812 	case WM_T_PCH2:
3813 	case WM_T_PCH_LPT:
3814 	case WM_T_PCH_SPT:
3815 	case WM_T_PCH_CNP:
3816 		delay(10*1000);
3817 		if (sc->sc_type >= WM_T_ICH10)
3818 			wm_lan_init_done(sc);
3819 		else
3820 			wm_get_auto_rd_done(sc);
3821 
3822 		reg = CSR_READ(sc, WMREG_STATUS);
3823 		if ((reg & STATUS_PHYRA) != 0)
3824 			CSR_WRITE(sc, WMREG_STATUS, reg & ~STATUS_PHYRA);
3825 		break;
3826 	default:
3827 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
3828 		    __func__);
3829 		break;
3830 	}
3831 }
3832 
3833 void
3834 wm_phy_post_reset(struct wm_softc *sc)
3835 {
3836 	uint32_t reg;
3837 
3838 	/* This function is only for ICH8 and newer. */
3839 	if (sc->sc_type < WM_T_ICH8)
3840 		return;
3841 
3842 	if (wm_phy_resetisblocked(sc)) {
3843 		/* XXX */
3844 		device_printf(sc->sc_dev, "PHY is blocked\n");
3845 		return;
3846 	}
3847 
3848 	/* Allow time for h/w to get to quiescent state after reset */
3849 	delay(10*1000);
3850 
3851 	/* Perform any necessary post-reset workarounds */
3852 	if (sc->sc_type == WM_T_PCH)
3853 		wm_hv_phy_workaround_ich8lan(sc);
3854 	if (sc->sc_type == WM_T_PCH2)
3855 		wm_lv_phy_workaround_ich8lan(sc);
3856 
3857 	/* Clear the host wakeup bit after lcd reset */
3858 	if (sc->sc_type >= WM_T_PCH) {
3859 		reg = wm_gmii_hv_readreg(sc->sc_dev, 2,
3860 		    BM_PORT_GEN_CFG);
3861 		reg &= ~BM_WUC_HOST_WU_BIT;
3862 		wm_gmii_hv_writereg(sc->sc_dev, 2,
3863 		    BM_PORT_GEN_CFG, reg);
3864 	}
3865 
3866 	/* Configure the LCD with the extended configuration region in NVM */
3867 	wm_init_lcd_from_nvm(sc);
3868 
3869 	/* Configure the LCD with the OEM bits in NVM */
3870 }
3871 
3872 /* Only for PCH and newer */
3873 static void
3874 wm_write_smbus_addr(struct wm_softc *sc)
3875 {
3876 	uint32_t strap, freq;
3877 	uint32_t phy_data;
3878 
3879 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3880 		device_xname(sc->sc_dev), __func__));
3881 
3882 	strap = CSR_READ(sc, WMREG_STRAP);
3883 	freq = __SHIFTOUT(strap, STRAP_FREQ);
3884 
3885 	phy_data = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_SMB_ADDR);
3886 
3887 	phy_data &= ~HV_SMB_ADDR_ADDR;
3888 	phy_data |= __SHIFTOUT(strap, STRAP_SMBUSADDR);
3889 	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
3890 
3891 	if (sc->sc_phytype == WMPHY_I217) {
3892 		/* Restore SMBus frequency */
3893 		if (freq --) {
3894 			phy_data &= ~(HV_SMB_ADDR_FREQ_LOW
3895 			    | HV_SMB_ADDR_FREQ_HIGH);
3896 			phy_data |= __SHIFTIN((freq & 0x01) != 0,
3897 			    HV_SMB_ADDR_FREQ_LOW);
3898 			phy_data |= __SHIFTIN((freq & 0x02) != 0,
3899 			    HV_SMB_ADDR_FREQ_HIGH);
3900 		} else {
3901 			DPRINTF(WM_DEBUG_INIT,
3902 			    ("%s: %s Unsupported SMB frequency in PHY\n",
3903 				device_xname(sc->sc_dev), __func__));
3904 		}
3905 	}
3906 
3907 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_SMB_ADDR, phy_data);
3908 }
3909 
3910 void
3911 wm_init_lcd_from_nvm(struct wm_softc *sc)
3912 {
3913 	uint32_t extcnfctr, sw_cfg_mask, cnf_size, word_addr, i, reg;
3914 	uint16_t phy_page = 0;
3915 
3916 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
3917 		device_xname(sc->sc_dev), __func__));
3918 
3919 	switch (sc->sc_type) {
3920 	case WM_T_ICH8:
3921 		if ((sc->sc_phytype == WMPHY_UNKNOWN)
3922 		    || (sc->sc_phytype != WMPHY_IGP_3))
3923 			return;
3924 
3925 		if ((sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_AMT)
3926 		    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_82801H_LAN)) {
3927 			sw_cfg_mask = FEXTNVM_SW_CONFIG;
3928 			break;
3929 		}
3930 		/* FALLTHROUGH */
3931 	case WM_T_PCH:
3932 	case WM_T_PCH2:
3933 	case WM_T_PCH_LPT:
3934 	case WM_T_PCH_SPT:
3935 	case WM_T_PCH_CNP:
3936 		sw_cfg_mask = FEXTNVM_SW_CONFIG_ICH8M;
3937 		break;
3938 	default:
3939 		return;
3940 	}
3941 
3942 	sc->phy.acquire(sc);
3943 
3944 	reg = CSR_READ(sc, WMREG_FEXTNVM);
3945 	if ((reg & sw_cfg_mask) == 0)
3946 		goto release;
3947 
3948 	/*
3949 	 * Make sure HW does not configure LCD from PHY extended configuration
3950 	 * before SW configuration
3951 	 */
3952 	extcnfctr = CSR_READ(sc, WMREG_EXTCNFCTR);
3953 	if ((sc->sc_type < WM_T_PCH2)
3954 	    && ((extcnfctr & EXTCNFCTR_PCIE_WRITE_ENABLE) != 0))
3955 		goto release;
3956 
3957 	DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure LCD by software\n",
3958 		device_xname(sc->sc_dev), __func__));
3959 	/* word_addr is in DWORD */
3960 	word_addr = __SHIFTOUT(extcnfctr, EXTCNFCTR_EXT_CNF_POINTER) << 1;
3961 
3962 	reg = CSR_READ(sc, WMREG_EXTCNFSIZE);
3963 	cnf_size = __SHIFTOUT(reg, EXTCNFSIZE_LENGTH);
3964 	if (cnf_size == 0)
3965 		goto release;
3966 
3967 	if (((sc->sc_type == WM_T_PCH)
3968 		&& ((extcnfctr & EXTCNFCTR_OEM_WRITE_ENABLE) == 0))
3969 	    || (sc->sc_type > WM_T_PCH)) {
3970 		/*
3971 		 * HW configures the SMBus address and LEDs when the OEM and
3972 		 * LCD Write Enable bits are set in the NVM. When both NVM bits
3973 		 * are cleared, SW will configure them instead.
3974 		 */
3975 		DPRINTF(WM_DEBUG_INIT, ("%s: %s: Configure SMBus and LED\n",
3976 			device_xname(sc->sc_dev), __func__));
3977 		wm_write_smbus_addr(sc);
3978 
3979 		reg = CSR_READ(sc, WMREG_LEDCTL);
3980 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, HV_LED_CONFIG, reg);
3981 	}
3982 
3983 	/* Configure LCD from extended configuration region. */
3984 	for (i = 0; i < cnf_size; i++) {
3985 		uint16_t reg_data, reg_addr;
3986 
3987 		if (wm_nvm_read(sc, (word_addr + i * 2), 1, &reg_data) != 0)
3988 			goto release;
3989 
3990 		if (wm_nvm_read(sc, (word_addr + i * 2 + 1), 1, &reg_addr) !=0)
3991 			goto release;
3992 
3993 		if (reg_addr == MII_IGPHY_PAGE_SELECT)
3994 			phy_page = reg_data;
3995 
3996 		reg_addr &= IGPHY_MAXREGADDR;
3997 		reg_addr |= phy_page;
3998 
3999 		sc->phy.release(sc); /* XXX */
4000 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, reg_addr, reg_data);
4001 		sc->phy.acquire(sc); /* XXX */
4002 	}
4003 
4004 release:
4005 	sc->phy.release(sc);
4006 	return;
4007 }
4008 
4009 
4010 /* Init hardware bits */
4011 void
4012 wm_initialize_hardware_bits(struct wm_softc *sc)
4013 {
4014 	uint32_t tarc0, tarc1, reg;
4015 
4016 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4017 		device_xname(sc->sc_dev), __func__));
4018 
4019 	/* For 82571 variant, 80003 and ICHs */
4020 	if (((sc->sc_type >= WM_T_82571) && (sc->sc_type <= WM_T_82583))
4021 	    || (sc->sc_type >= WM_T_80003)) {
4022 
4023 		/* Transmit Descriptor Control 0 */
4024 		reg = CSR_READ(sc, WMREG_TXDCTL(0));
4025 		reg |= TXDCTL_COUNT_DESC;
4026 		CSR_WRITE(sc, WMREG_TXDCTL(0), reg);
4027 
4028 		/* Transmit Descriptor Control 1 */
4029 		reg = CSR_READ(sc, WMREG_TXDCTL(1));
4030 		reg |= TXDCTL_COUNT_DESC;
4031 		CSR_WRITE(sc, WMREG_TXDCTL(1), reg);
4032 
4033 		/* TARC0 */
4034 		tarc0 = CSR_READ(sc, WMREG_TARC0);
4035 		switch (sc->sc_type) {
4036 		case WM_T_82571:
4037 		case WM_T_82572:
4038 		case WM_T_82573:
4039 		case WM_T_82574:
4040 		case WM_T_82583:
4041 		case WM_T_80003:
4042 			/* Clear bits 30..27 */
4043 			tarc0 &= ~__BITS(30, 27);
4044 			break;
4045 		default:
4046 			break;
4047 		}
4048 
4049 		switch (sc->sc_type) {
4050 		case WM_T_82571:
4051 		case WM_T_82572:
4052 			tarc0 |= __BITS(26, 23); /* TARC0 bits 23-26 */
4053 
4054 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4055 			tarc1 &= ~__BITS(30, 29); /* Clear bits 30 and 29 */
4056 			tarc1 |= __BITS(26, 24); /* TARC1 bits 26-24 */
4057 			/* 8257[12] Errata No.7 */
4058 			tarc1 |= __BIT(22); /* TARC1 bits 22 */
4059 
4060 			/* TARC1 bit 28 */
4061 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4062 				tarc1 &= ~__BIT(28);
4063 			else
4064 				tarc1 |= __BIT(28);
4065 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4066 
4067 			/*
4068 			 * 8257[12] Errata No.13
4069 			 * Disable Dyamic Clock Gating.
4070 			 */
4071 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4072 			reg &= ~CTRL_EXT_DMA_DYN_CLK;
4073 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4074 			break;
4075 		case WM_T_82573:
4076 		case WM_T_82574:
4077 		case WM_T_82583:
4078 			if ((sc->sc_type == WM_T_82574)
4079 			    || (sc->sc_type == WM_T_82583))
4080 				tarc0 |= __BIT(26); /* TARC0 bit 26 */
4081 
4082 			/* Extended Device Control */
4083 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4084 			reg &= ~__BIT(23);	/* Clear bit 23 */
4085 			reg |= __BIT(22);	/* Set bit 22 */
4086 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4087 
4088 			/* Device Control */
4089 			sc->sc_ctrl &= ~__BIT(29);	/* Clear bit 29 */
4090 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4091 
4092 			/* PCIe Control Register */
4093 			/*
4094 			 * 82573 Errata (unknown).
4095 			 *
4096 			 * 82574 Errata 25 and 82583 Errata 12
4097 			 * "Dropped Rx Packets":
4098 			 *   NVM Image Version 2.1.4 and newer has no this bug.
4099 			 */
4100 			reg = CSR_READ(sc, WMREG_GCR);
4101 			reg |= GCR_L1_ACT_WITHOUT_L0S_RX;
4102 			CSR_WRITE(sc, WMREG_GCR, reg);
4103 
4104 			if ((sc->sc_type == WM_T_82574)
4105 			    || (sc->sc_type == WM_T_82583)) {
4106 				/*
4107 				 * Document says this bit must be set for
4108 				 * proper operation.
4109 				 */
4110 				reg = CSR_READ(sc, WMREG_GCR);
4111 				reg |= __BIT(22);
4112 				CSR_WRITE(sc, WMREG_GCR, reg);
4113 
4114 				/*
4115 				 * Apply workaround for hardware errata
4116 				 * documented in errata docs Fixes issue where
4117 				 * some error prone or unreliable PCIe
4118 				 * completions are occurring, particularly
4119 				 * with ASPM enabled. Without fix, issue can
4120 				 * cause Tx timeouts.
4121 				 */
4122 				reg = CSR_READ(sc, WMREG_GCR2);
4123 				reg |= __BIT(0);
4124 				CSR_WRITE(sc, WMREG_GCR2, reg);
4125 			}
4126 			break;
4127 		case WM_T_80003:
4128 			/* TARC0 */
4129 			if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
4130 			    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
4131 				tarc0 &= ~__BIT(20); /* Clear bits 20 */
4132 
4133 			/* TARC1 bit 28 */
4134 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4135 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4136 				tarc1 &= ~__BIT(28);
4137 			else
4138 				tarc1 |= __BIT(28);
4139 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4140 			break;
4141 		case WM_T_ICH8:
4142 		case WM_T_ICH9:
4143 		case WM_T_ICH10:
4144 		case WM_T_PCH:
4145 		case WM_T_PCH2:
4146 		case WM_T_PCH_LPT:
4147 		case WM_T_PCH_SPT:
4148 		case WM_T_PCH_CNP:
4149 			/* TARC0 */
4150 			if (sc->sc_type == WM_T_ICH8) {
4151 				/* Set TARC0 bits 29 and 28 */
4152 				tarc0 |= __BITS(29, 28);
4153 			} else if (sc->sc_type == WM_T_PCH_SPT) {
4154 				tarc0 |= __BIT(29);
4155 				/*
4156 				 *  Drop bit 28. From Linux.
4157 				 * See I218/I219 spec update
4158 				 * "5. Buffer Overrun While the I219 is
4159 				 * Processing DMA Transactions"
4160 				 */
4161 				tarc0 &= ~__BIT(28);
4162 			}
4163 			/* Set TARC0 bits 23,24,26,27 */
4164 			tarc0 |= __BITS(27, 26) | __BITS(24, 23);
4165 
4166 			/* CTRL_EXT */
4167 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
4168 			reg |= __BIT(22);	/* Set bit 22 */
4169 			/*
4170 			 * Enable PHY low-power state when MAC is at D3
4171 			 * w/o WoL
4172 			 */
4173 			if (sc->sc_type >= WM_T_PCH)
4174 				reg |= CTRL_EXT_PHYPDEN;
4175 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4176 
4177 			/* TARC1 */
4178 			tarc1 = CSR_READ(sc, WMREG_TARC1);
4179 			/* bit 28 */
4180 			if ((CSR_READ(sc, WMREG_TCTL) & TCTL_MULR) != 0)
4181 				tarc1 &= ~__BIT(28);
4182 			else
4183 				tarc1 |= __BIT(28);
4184 			tarc1 |= __BIT(24) | __BIT(26) | __BIT(30);
4185 			CSR_WRITE(sc, WMREG_TARC1, tarc1);
4186 
4187 			/* Device Status */
4188 			if (sc->sc_type == WM_T_ICH8) {
4189 				reg = CSR_READ(sc, WMREG_STATUS);
4190 				reg &= ~__BIT(31);
4191 				CSR_WRITE(sc, WMREG_STATUS, reg);
4192 
4193 			}
4194 
4195 			/* IOSFPC */
4196 			if (sc->sc_type == WM_T_PCH_SPT) {
4197 				reg = CSR_READ(sc, WMREG_IOSFPC);
4198 				reg |= RCTL_RDMTS_HEX; /* XXX RTCL bit? */
4199 				CSR_WRITE(sc, WMREG_IOSFPC, reg);
4200 			}
4201 			/*
4202 			 * Work-around descriptor data corruption issue during
4203 			 * NFS v2 UDP traffic, just disable the NFS filtering
4204 			 * capability.
4205 			 */
4206 			reg = CSR_READ(sc, WMREG_RFCTL);
4207 			reg |= WMREG_RFCTL_NFSWDIS | WMREG_RFCTL_NFSRDIS;
4208 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4209 			break;
4210 		default:
4211 			break;
4212 		}
4213 		CSR_WRITE(sc, WMREG_TARC0, tarc0);
4214 
4215 		switch (sc->sc_type) {
4216 		/*
4217 		 * 8257[12] Errata No.52, 82573 Errata No.43 and some others.
4218 		 * Avoid RSS Hash Value bug.
4219 		 */
4220 		case WM_T_82571:
4221 		case WM_T_82572:
4222 		case WM_T_82573:
4223 		case WM_T_80003:
4224 		case WM_T_ICH8:
4225 			reg = CSR_READ(sc, WMREG_RFCTL);
4226 			reg |= WMREG_RFCTL_NEWIPV6EXDIS |WMREG_RFCTL_IPV6EXDIS;
4227 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4228 			break;
4229 		case WM_T_82574:
4230 			/* use extened Rx descriptor. */
4231 			reg = CSR_READ(sc, WMREG_RFCTL);
4232 			reg |= WMREG_RFCTL_EXSTEN;
4233 			CSR_WRITE(sc, WMREG_RFCTL, reg);
4234 			break;
4235 		default:
4236 			break;
4237 		}
4238 	} else if ((sc->sc_type >= WM_T_82575) && (sc->sc_type <= WM_T_I211)) {
4239 		/*
4240 		 * 82575 Errata XXX, 82576 Errata 46, 82580 Errata 24,
4241 		 * I350 Errata 37, I210 Errata No. 31 and I211 Errata No. 11:
4242 		 * "Certain Malformed IPv6 Extension Headers are Not Processed
4243 		 * Correctly by the Device"
4244 		 *
4245 		 * I354(C2000) Errata AVR53:
4246 		 * "Malformed IPv6 Extension Headers May Result in LAN Device
4247 		 * Hang"
4248 		 */
4249 		reg = CSR_READ(sc, WMREG_RFCTL);
4250 		reg |= WMREG_RFCTL_IPV6EXDIS;
4251 		CSR_WRITE(sc, WMREG_RFCTL, reg);
4252 	}
4253 }
4254 
4255 static uint32_t
4256 wm_rxpbs_adjust_82580(uint32_t val)
4257 {
4258 	uint32_t rv = 0;
4259 
4260 	if (val < __arraycount(wm_82580_rxpbs_table))
4261 		rv = wm_82580_rxpbs_table[val];
4262 
4263 	return rv;
4264 }
4265 
4266 /*
4267  * wm_reset_phy:
4268  *
4269  *	generic PHY reset function.
4270  *	Same as e1000_phy_hw_reset_generic()
4271  */
4272 static void
4273 wm_reset_phy(struct wm_softc *sc)
4274 {
4275 	uint32_t reg;
4276 
4277 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4278 		device_xname(sc->sc_dev), __func__));
4279 	if (wm_phy_resetisblocked(sc))
4280 		return;
4281 
4282 	sc->phy.acquire(sc);
4283 
4284 	reg = CSR_READ(sc, WMREG_CTRL);
4285 	CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
4286 	CSR_WRITE_FLUSH(sc);
4287 
4288 	delay(sc->phy.reset_delay_us);
4289 
4290 	CSR_WRITE(sc, WMREG_CTRL, reg);
4291 	CSR_WRITE_FLUSH(sc);
4292 
4293 	delay(150);
4294 
4295 	sc->phy.release(sc);
4296 
4297 	wm_get_cfg_done(sc);
4298 	wm_phy_post_reset(sc);
4299 }
4300 
4301 /*
4302  * Only used by WM_T_PCH_SPT which does not use multiqueue,
4303  * so it is enough to check sc->sc_queue[0] only.
4304  */
4305 static void
4306 wm_flush_desc_rings(struct wm_softc *sc)
4307 {
4308 	pcireg_t preg;
4309 	uint32_t reg;
4310 	struct wm_txqueue *txq;
4311 	wiseman_txdesc_t *txd;
4312 	int nexttx;
4313 	uint32_t rctl;
4314 
4315 	/* First, disable MULR fix in FEXTNVM11 */
4316 	reg = CSR_READ(sc, WMREG_FEXTNVM11);
4317 	reg |= FEXTNVM11_DIS_MULRFIX;
4318 	CSR_WRITE(sc, WMREG_FEXTNVM11, reg);
4319 
4320 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4321 	reg = CSR_READ(sc, WMREG_TDLEN(0));
4322 	if (((preg & DESCRING_STATUS_FLUSH_REQ) == 0) || (reg == 0))
4323 		return;
4324 
4325 	/* TX */
4326 	printf("%s: Need TX flush (reg = %08x, len = %u)\n",
4327 	    device_xname(sc->sc_dev), preg, reg);
4328 	reg = CSR_READ(sc, WMREG_TCTL);
4329 	CSR_WRITE(sc, WMREG_TCTL, reg | TCTL_EN);
4330 
4331 	txq = &sc->sc_queue[0].wmq_txq;
4332 	nexttx = txq->txq_next;
4333 	txd = &txq->txq_descs[nexttx];
4334 	wm_set_dma_addr(&txd->wtx_addr, WM_CDTXADDR(txq, nexttx));
4335 	txd->wtx_cmdlen = htole32(WTX_CMD_IFCS | 512);
4336 	txd->wtx_fields.wtxu_status = 0;
4337 	txd->wtx_fields.wtxu_options = 0;
4338 	txd->wtx_fields.wtxu_vlan = 0;
4339 
4340 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4341 	    BUS_SPACE_BARRIER_WRITE);
4342 
4343 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
4344 	CSR_WRITE(sc, WMREG_TDT(0), txq->txq_next);
4345 	bus_space_barrier(sc->sc_st, sc->sc_sh, 0, 0,
4346 	    BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE);
4347 	delay(250);
4348 
4349 	preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, WM_PCI_DESCRING_STATUS);
4350 	if ((preg & DESCRING_STATUS_FLUSH_REQ) == 0)
4351 		return;
4352 
4353 	/* RX */
4354 	printf("%s: Need RX flush (reg = %08x)\n",
4355 	    device_xname(sc->sc_dev), preg);
4356 	rctl = CSR_READ(sc, WMREG_RCTL);
4357 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4358 	CSR_WRITE_FLUSH(sc);
4359 	delay(150);
4360 
4361 	reg = CSR_READ(sc, WMREG_RXDCTL(0));
4362 	/* zero the lower 14 bits (prefetch and host thresholds) */
4363 	reg &= 0xffffc000;
4364 	/*
4365 	 * update thresholds: prefetch threshold to 31, host threshold
4366 	 * to 1 and make sure the granularity is "descriptors" and not
4367 	 * "cache lines"
4368 	 */
4369 	reg |= (0x1f | (1 << 8) | RXDCTL_GRAN);
4370 	CSR_WRITE(sc, WMREG_RXDCTL(0), reg);
4371 
4372 	/*
4373 	 * momentarily enable the RX ring for the changes to take
4374 	 * effect
4375 	 */
4376 	CSR_WRITE(sc, WMREG_RCTL, rctl | RCTL_EN);
4377 	CSR_WRITE_FLUSH(sc);
4378 	delay(150);
4379 	CSR_WRITE(sc, WMREG_RCTL, rctl & ~RCTL_EN);
4380 }
4381 
4382 /*
4383  * wm_reset:
4384  *
4385  *	Reset the i82542 chip.
4386  */
4387 static void
4388 wm_reset(struct wm_softc *sc)
4389 {
4390 	int phy_reset = 0;
4391 	int i, error = 0;
4392 	uint32_t reg;
4393 	uint16_t kmreg;
4394 	int rv;
4395 
4396 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
4397 		device_xname(sc->sc_dev), __func__));
4398 	KASSERT(sc->sc_type != 0);
4399 
4400 	/*
4401 	 * Allocate on-chip memory according to the MTU size.
4402 	 * The Packet Buffer Allocation register must be written
4403 	 * before the chip is reset.
4404 	 */
4405 	switch (sc->sc_type) {
4406 	case WM_T_82547:
4407 	case WM_T_82547_2:
4408 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4409 		    PBA_22K : PBA_30K;
4410 		for (i = 0; i < sc->sc_nqueues; i++) {
4411 			struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
4412 			txq->txq_fifo_head = 0;
4413 			txq->txq_fifo_addr = sc->sc_pba << PBA_ADDR_SHIFT;
4414 			txq->txq_fifo_size =
4415 			    (PBA_40K - sc->sc_pba) << PBA_BYTE_SHIFT;
4416 			txq->txq_fifo_stall = 0;
4417 		}
4418 		break;
4419 	case WM_T_82571:
4420 	case WM_T_82572:
4421 	case WM_T_82575:	/* XXX need special handing for jumbo frames */
4422 	case WM_T_80003:
4423 		sc->sc_pba = PBA_32K;
4424 		break;
4425 	case WM_T_82573:
4426 		sc->sc_pba = PBA_12K;
4427 		break;
4428 	case WM_T_82574:
4429 	case WM_T_82583:
4430 		sc->sc_pba = PBA_20K;
4431 		break;
4432 	case WM_T_82576:
4433 		sc->sc_pba = CSR_READ(sc, WMREG_RXPBS);
4434 		sc->sc_pba &= RXPBS_SIZE_MASK_82576;
4435 		break;
4436 	case WM_T_82580:
4437 	case WM_T_I350:
4438 	case WM_T_I354:
4439 		sc->sc_pba = wm_rxpbs_adjust_82580(CSR_READ(sc, WMREG_RXPBS));
4440 		break;
4441 	case WM_T_I210:
4442 	case WM_T_I211:
4443 		sc->sc_pba = PBA_34K;
4444 		break;
4445 	case WM_T_ICH8:
4446 		/* Workaround for a bit corruption issue in FIFO memory */
4447 		sc->sc_pba = PBA_8K;
4448 		CSR_WRITE(sc, WMREG_PBS, PBA_16K);
4449 		break;
4450 	case WM_T_ICH9:
4451 	case WM_T_ICH10:
4452 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 4096 ?
4453 		    PBA_14K : PBA_10K;
4454 		break;
4455 	case WM_T_PCH:
4456 	case WM_T_PCH2:	/* XXX 14K? */
4457 	case WM_T_PCH_LPT:
4458 	case WM_T_PCH_SPT:
4459 	case WM_T_PCH_CNP:
4460 		sc->sc_pba = PBA_26K;
4461 		break;
4462 	default:
4463 		sc->sc_pba = sc->sc_ethercom.ec_if.if_mtu > 8192 ?
4464 		    PBA_40K : PBA_48K;
4465 		break;
4466 	}
4467 	/*
4468 	 * Only old or non-multiqueue devices have the PBA register
4469 	 * XXX Need special handling for 82575.
4470 	 */
4471 	if (((sc->sc_flags & WM_F_NEWQUEUE) == 0)
4472 	    || (sc->sc_type == WM_T_82575))
4473 		CSR_WRITE(sc, WMREG_PBA, sc->sc_pba);
4474 
4475 	/* Prevent the PCI-E bus from sticking */
4476 	if (sc->sc_flags & WM_F_PCIE) {
4477 		int timeout = 800;
4478 
4479 		sc->sc_ctrl |= CTRL_GIO_M_DIS;
4480 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
4481 
4482 		while (timeout--) {
4483 			if ((CSR_READ(sc, WMREG_STATUS) & STATUS_GIO_M_ENA)
4484 			    == 0)
4485 				break;
4486 			delay(100);
4487 		}
4488 		if (timeout == 0)
4489 			device_printf(sc->sc_dev,
4490 			    "failed to disable busmastering\n");
4491 	}
4492 
4493 	/* Set the completion timeout for interface */
4494 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
4495 	    || (sc->sc_type == WM_T_82580)
4496 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
4497 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211))
4498 		wm_set_pcie_completion_timeout(sc);
4499 
4500 	/* Clear interrupt */
4501 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4502 	if (wm_is_using_msix(sc)) {
4503 		if (sc->sc_type != WM_T_82574) {
4504 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4505 			CSR_WRITE(sc, WMREG_EIAC, 0);
4506 		} else {
4507 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4508 		}
4509 	}
4510 
4511 	/* Stop the transmit and receive processes. */
4512 	CSR_WRITE(sc, WMREG_RCTL, 0);
4513 	sc->sc_rctl &= ~RCTL_EN;
4514 	CSR_WRITE(sc, WMREG_TCTL, TCTL_PSP);
4515 	CSR_WRITE_FLUSH(sc);
4516 
4517 	/* XXX set_tbi_sbp_82543() */
4518 
4519 	delay(10*1000);
4520 
4521 	/* Must acquire the MDIO ownership before MAC reset */
4522 	switch (sc->sc_type) {
4523 	case WM_T_82573:
4524 	case WM_T_82574:
4525 	case WM_T_82583:
4526 		error = wm_get_hw_semaphore_82573(sc);
4527 		break;
4528 	default:
4529 		break;
4530 	}
4531 
4532 	/*
4533 	 * 82541 Errata 29? & 82547 Errata 28?
4534 	 * See also the description about PHY_RST bit in CTRL register
4535 	 * in 8254x_GBe_SDM.pdf.
4536 	 */
4537 	if ((sc->sc_type == WM_T_82541) || (sc->sc_type == WM_T_82547)) {
4538 		CSR_WRITE(sc, WMREG_CTRL,
4539 		    CSR_READ(sc, WMREG_CTRL) | CTRL_PHY_RESET);
4540 		CSR_WRITE_FLUSH(sc);
4541 		delay(5000);
4542 	}
4543 
4544 	switch (sc->sc_type) {
4545 	case WM_T_82544: /* XXX check whether WM_F_IOH_VALID is set */
4546 	case WM_T_82541:
4547 	case WM_T_82541_2:
4548 	case WM_T_82547:
4549 	case WM_T_82547_2:
4550 		/*
4551 		 * On some chipsets, a reset through a memory-mapped write
4552 		 * cycle can cause the chip to reset before completing the
4553 		 * write cycle. This causes major headache that can be avoided
4554 		 * by issuing the reset via indirect register writes through
4555 		 * I/O space.
4556 		 *
4557 		 * So, if we successfully mapped the I/O BAR at attach time,
4558 		 * use that. Otherwise, try our luck with a memory-mapped
4559 		 * reset.
4560 		 */
4561 		if (sc->sc_flags & WM_F_IOH_VALID)
4562 			wm_io_write(sc, WMREG_CTRL, CTRL_RST);
4563 		else
4564 			CSR_WRITE(sc, WMREG_CTRL, CTRL_RST);
4565 		break;
4566 	case WM_T_82545_3:
4567 	case WM_T_82546_3:
4568 		/* Use the shadow control register on these chips. */
4569 		CSR_WRITE(sc, WMREG_CTRL_SHADOW, CTRL_RST);
4570 		break;
4571 	case WM_T_80003:
4572 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4573 		sc->phy.acquire(sc);
4574 		CSR_WRITE(sc, WMREG_CTRL, reg);
4575 		sc->phy.release(sc);
4576 		break;
4577 	case WM_T_ICH8:
4578 	case WM_T_ICH9:
4579 	case WM_T_ICH10:
4580 	case WM_T_PCH:
4581 	case WM_T_PCH2:
4582 	case WM_T_PCH_LPT:
4583 	case WM_T_PCH_SPT:
4584 	case WM_T_PCH_CNP:
4585 		reg = CSR_READ(sc, WMREG_CTRL) | CTRL_RST;
4586 		if (wm_phy_resetisblocked(sc) == false) {
4587 			/*
4588 			 * Gate automatic PHY configuration by hardware on
4589 			 * non-managed 82579
4590 			 */
4591 			if ((sc->sc_type == WM_T_PCH2)
4592 			    && ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID)
4593 				== 0))
4594 				wm_gate_hw_phy_config_ich8lan(sc, true);
4595 
4596 			reg |= CTRL_PHY_RESET;
4597 			phy_reset = 1;
4598 		} else
4599 			printf("XXX reset is blocked!!!\n");
4600 		sc->phy.acquire(sc);
4601 		CSR_WRITE(sc, WMREG_CTRL, reg);
4602 		/* Don't insert a completion barrier when reset */
4603 		delay(20*1000);
4604 		mutex_exit(sc->sc_ich_phymtx);
4605 		break;
4606 	case WM_T_82580:
4607 	case WM_T_I350:
4608 	case WM_T_I354:
4609 	case WM_T_I210:
4610 	case WM_T_I211:
4611 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4612 		if (sc->sc_pcidevid != PCI_PRODUCT_INTEL_DH89XXCC_SGMII)
4613 			CSR_WRITE_FLUSH(sc);
4614 		delay(5000);
4615 		break;
4616 	case WM_T_82542_2_0:
4617 	case WM_T_82542_2_1:
4618 	case WM_T_82543:
4619 	case WM_T_82540:
4620 	case WM_T_82545:
4621 	case WM_T_82546:
4622 	case WM_T_82571:
4623 	case WM_T_82572:
4624 	case WM_T_82573:
4625 	case WM_T_82574:
4626 	case WM_T_82575:
4627 	case WM_T_82576:
4628 	case WM_T_82583:
4629 	default:
4630 		/* Everything else can safely use the documented method. */
4631 		CSR_WRITE(sc, WMREG_CTRL, CSR_READ(sc, WMREG_CTRL) | CTRL_RST);
4632 		break;
4633 	}
4634 
4635 	/* Must release the MDIO ownership after MAC reset */
4636 	switch (sc->sc_type) {
4637 	case WM_T_82573:
4638 	case WM_T_82574:
4639 	case WM_T_82583:
4640 		if (error == 0)
4641 			wm_put_hw_semaphore_82573(sc);
4642 		break;
4643 	default:
4644 		break;
4645 	}
4646 
4647 	if (phy_reset != 0)
4648 		wm_get_cfg_done(sc);
4649 
4650 	/* reload EEPROM */
4651 	switch (sc->sc_type) {
4652 	case WM_T_82542_2_0:
4653 	case WM_T_82542_2_1:
4654 	case WM_T_82543:
4655 	case WM_T_82544:
4656 		delay(10);
4657 		reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4658 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4659 		CSR_WRITE_FLUSH(sc);
4660 		delay(2000);
4661 		break;
4662 	case WM_T_82540:
4663 	case WM_T_82545:
4664 	case WM_T_82545_3:
4665 	case WM_T_82546:
4666 	case WM_T_82546_3:
4667 		delay(5*1000);
4668 		/* XXX Disable HW ARPs on ASF enabled adapters */
4669 		break;
4670 	case WM_T_82541:
4671 	case WM_T_82541_2:
4672 	case WM_T_82547:
4673 	case WM_T_82547_2:
4674 		delay(20000);
4675 		/* XXX Disable HW ARPs on ASF enabled adapters */
4676 		break;
4677 	case WM_T_82571:
4678 	case WM_T_82572:
4679 	case WM_T_82573:
4680 	case WM_T_82574:
4681 	case WM_T_82583:
4682 		if (sc->sc_flags & WM_F_EEPROM_FLASH) {
4683 			delay(10);
4684 			reg = CSR_READ(sc, WMREG_CTRL_EXT) | CTRL_EXT_EE_RST;
4685 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
4686 			CSR_WRITE_FLUSH(sc);
4687 		}
4688 		/* check EECD_EE_AUTORD */
4689 		wm_get_auto_rd_done(sc);
4690 		/*
4691 		 * Phy configuration from NVM just starts after EECD_AUTO_RD
4692 		 * is set.
4693 		 */
4694 		if ((sc->sc_type == WM_T_82573) || (sc->sc_type == WM_T_82574)
4695 		    || (sc->sc_type == WM_T_82583))
4696 			delay(25*1000);
4697 		break;
4698 	case WM_T_82575:
4699 	case WM_T_82576:
4700 	case WM_T_82580:
4701 	case WM_T_I350:
4702 	case WM_T_I354:
4703 	case WM_T_I210:
4704 	case WM_T_I211:
4705 	case WM_T_80003:
4706 		/* check EECD_EE_AUTORD */
4707 		wm_get_auto_rd_done(sc);
4708 		break;
4709 	case WM_T_ICH8:
4710 	case WM_T_ICH9:
4711 	case WM_T_ICH10:
4712 	case WM_T_PCH:
4713 	case WM_T_PCH2:
4714 	case WM_T_PCH_LPT:
4715 	case WM_T_PCH_SPT:
4716 	case WM_T_PCH_CNP:
4717 		break;
4718 	default:
4719 		panic("%s: unknown type\n", __func__);
4720 	}
4721 
4722 	/* Check whether EEPROM is present or not */
4723 	switch (sc->sc_type) {
4724 	case WM_T_82575:
4725 	case WM_T_82576:
4726 	case WM_T_82580:
4727 	case WM_T_I350:
4728 	case WM_T_I354:
4729 	case WM_T_ICH8:
4730 	case WM_T_ICH9:
4731 		if ((CSR_READ(sc, WMREG_EECD) & EECD_EE_PRES) == 0) {
4732 			/* Not found */
4733 			sc->sc_flags |= WM_F_EEPROM_INVALID;
4734 			if (sc->sc_type == WM_T_82575)
4735 				wm_reset_init_script_82575(sc);
4736 		}
4737 		break;
4738 	default:
4739 		break;
4740 	}
4741 
4742 	if (phy_reset != 0)
4743 		wm_phy_post_reset(sc);
4744 
4745 	if ((sc->sc_type == WM_T_82580)
4746 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)) {
4747 		/* clear global device reset status bit */
4748 		CSR_WRITE(sc, WMREG_STATUS, STATUS_DEV_RST_SET);
4749 	}
4750 
4751 	/* Clear any pending interrupt events. */
4752 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
4753 	reg = CSR_READ(sc, WMREG_ICR);
4754 	if (wm_is_using_msix(sc)) {
4755 		if (sc->sc_type != WM_T_82574) {
4756 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
4757 			CSR_WRITE(sc, WMREG_EIAC, 0);
4758 		} else
4759 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
4760 	}
4761 
4762 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
4763 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
4764 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
4765 	    || (sc->sc_type == WM_T_PCH_SPT) || (sc->sc_type == WM_T_PCH_CNP)){
4766 		reg = CSR_READ(sc, WMREG_KABGTXD);
4767 		reg |= KABGTXD_BGSQLBIAS;
4768 		CSR_WRITE(sc, WMREG_KABGTXD, reg);
4769 	}
4770 
4771 	/* reload sc_ctrl */
4772 	sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
4773 
4774 	if (sc->sc_type == WM_T_I354) {
4775 #if 0
4776 		/* I354 uses an external PHY */
4777 		wm_set_eee_i354(sc);
4778 #endif
4779 	} else if ((sc->sc_type >= WM_T_I350) && (sc->sc_type <= WM_T_I211))
4780 		wm_set_eee_i350(sc);
4781 
4782 	/*
4783 	 * For PCH, this write will make sure that any noise will be detected
4784 	 * as a CRC error and be dropped rather than show up as a bad packet
4785 	 * to the DMA engine
4786 	 */
4787 	if (sc->sc_type == WM_T_PCH)
4788 		CSR_WRITE(sc, WMREG_CRC_OFFSET, 0x65656565);
4789 
4790 	if (sc->sc_type >= WM_T_82544)
4791 		CSR_WRITE(sc, WMREG_WUC, 0);
4792 
4793 	wm_reset_mdicnfg_82580(sc);
4794 
4795 	if ((sc->sc_flags & WM_F_PLL_WA_I210) != 0)
4796 		wm_pll_workaround_i210(sc);
4797 
4798 	if (sc->sc_type == WM_T_80003) {
4799 		/* default to TRUE to enable the MDIC W/A */
4800 		sc->sc_flags |= WM_F_80003_MDIC_WA;
4801 
4802 		rv = wm_kmrn_readreg(sc,
4803 		    KUMCTRLSTA_OFFSET >> KUMCTRLSTA_OFFSET_SHIFT, &kmreg);
4804 		if (rv == 0) {
4805 			if ((kmreg & KUMCTRLSTA_OPMODE_MASK)
4806 			    == KUMCTRLSTA_OPMODE_INBAND_MDIO)
4807 				sc->sc_flags &= ~WM_F_80003_MDIC_WA;
4808 			else
4809 				sc->sc_flags |= WM_F_80003_MDIC_WA;
4810 		}
4811 	}
4812 }
4813 
4814 /*
4815  * wm_add_rxbuf:
4816  *
4817  *	Add a receive buffer to the indiciated descriptor.
4818  */
4819 static int
4820 wm_add_rxbuf(struct wm_rxqueue *rxq, int idx)
4821 {
4822 	struct wm_softc *sc = rxq->rxq_sc;
4823 	struct wm_rxsoft *rxs = &rxq->rxq_soft[idx];
4824 	struct mbuf *m;
4825 	int error;
4826 
4827 	KASSERT(mutex_owned(rxq->rxq_lock));
4828 
4829 	MGETHDR(m, M_DONTWAIT, MT_DATA);
4830 	if (m == NULL)
4831 		return ENOBUFS;
4832 
4833 	MCLGET(m, M_DONTWAIT);
4834 	if ((m->m_flags & M_EXT) == 0) {
4835 		m_freem(m);
4836 		return ENOBUFS;
4837 	}
4838 
4839 	if (rxs->rxs_mbuf != NULL)
4840 		bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4841 
4842 	rxs->rxs_mbuf = m;
4843 
4844 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
4845 	error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m,
4846 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
4847 	if (error) {
4848 		/* XXX XXX XXX */
4849 		aprint_error_dev(sc->sc_dev,
4850 		    "unable to load rx DMA map %d, error = %d\n", idx, error);
4851 		panic("wm_add_rxbuf");
4852 	}
4853 
4854 	bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
4855 	    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
4856 
4857 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
4858 		if ((sc->sc_rctl & RCTL_EN) != 0)
4859 			wm_init_rxdesc(rxq, idx);
4860 	} else
4861 		wm_init_rxdesc(rxq, idx);
4862 
4863 	return 0;
4864 }
4865 
4866 /*
4867  * wm_rxdrain:
4868  *
4869  *	Drain the receive queue.
4870  */
4871 static void
4872 wm_rxdrain(struct wm_rxqueue *rxq)
4873 {
4874 	struct wm_softc *sc = rxq->rxq_sc;
4875 	struct wm_rxsoft *rxs;
4876 	int i;
4877 
4878 	KASSERT(mutex_owned(rxq->rxq_lock));
4879 
4880 	for (i = 0; i < WM_NRXDESC; i++) {
4881 		rxs = &rxq->rxq_soft[i];
4882 		if (rxs->rxs_mbuf != NULL) {
4883 			bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap);
4884 			m_freem(rxs->rxs_mbuf);
4885 			rxs->rxs_mbuf = NULL;
4886 		}
4887 	}
4888 }
4889 
4890 /*
4891  * Setup registers for RSS.
4892  *
4893  * XXX not yet VMDq support
4894  */
4895 static void
4896 wm_init_rss(struct wm_softc *sc)
4897 {
4898 	uint32_t mrqc, reta_reg, rss_key[RSSRK_NUM_REGS];
4899 	int i;
4900 
4901 	CTASSERT(sizeof(rss_key) == RSS_KEYSIZE);
4902 
4903 	for (i = 0; i < RETA_NUM_ENTRIES; i++) {
4904 		int qid, reta_ent;
4905 
4906 		qid  = i % sc->sc_nqueues;
4907 		switch (sc->sc_type) {
4908 		case WM_T_82574:
4909 			reta_ent = __SHIFTIN(qid,
4910 			    RETA_ENT_QINDEX_MASK_82574);
4911 			break;
4912 		case WM_T_82575:
4913 			reta_ent = __SHIFTIN(qid,
4914 			    RETA_ENT_QINDEX1_MASK_82575);
4915 			break;
4916 		default:
4917 			reta_ent = __SHIFTIN(qid, RETA_ENT_QINDEX_MASK);
4918 			break;
4919 		}
4920 
4921 		reta_reg = CSR_READ(sc, WMREG_RETA_Q(i));
4922 		reta_reg &= ~RETA_ENTRY_MASK_Q(i);
4923 		reta_reg |= __SHIFTIN(reta_ent, RETA_ENTRY_MASK_Q(i));
4924 		CSR_WRITE(sc, WMREG_RETA_Q(i), reta_reg);
4925 	}
4926 
4927 	rss_getkey((uint8_t *)rss_key);
4928 	for (i = 0; i < RSSRK_NUM_REGS; i++)
4929 		CSR_WRITE(sc, WMREG_RSSRK(i), rss_key[i]);
4930 
4931 	if (sc->sc_type == WM_T_82574)
4932 		mrqc = MRQC_ENABLE_RSS_MQ_82574;
4933 	else
4934 		mrqc = MRQC_ENABLE_RSS_MQ;
4935 
4936 	/*
4937 	 * MRQC_RSS_FIELD_IPV6_EX is not set because of an errata.
4938 	 * See IPV6EXDIS bit in wm_initialize_hardware_bits().
4939 	 */
4940 	mrqc |= (MRQC_RSS_FIELD_IPV4 | MRQC_RSS_FIELD_IPV4_TCP);
4941 	mrqc |= (MRQC_RSS_FIELD_IPV6 | MRQC_RSS_FIELD_IPV6_TCP);
4942 	mrqc |= (MRQC_RSS_FIELD_IPV4_UDP | MRQC_RSS_FIELD_IPV6_UDP);
4943 	mrqc |= (MRQC_RSS_FIELD_IPV6_UDP_EX | MRQC_RSS_FIELD_IPV6_TCP_EX);
4944 
4945 	CSR_WRITE(sc, WMREG_MRQC, mrqc);
4946 }
4947 
4948 /*
4949  * Adjust TX and RX queue numbers which the system actulally uses.
4950  *
4951  * The numbers are affected by below parameters.
4952  *     - The nubmer of hardware queues
4953  *     - The number of MSI-X vectors (= "nvectors" argument)
4954  *     - ncpu
4955  */
4956 static void
4957 wm_adjust_qnum(struct wm_softc *sc, int nvectors)
4958 {
4959 	int hw_ntxqueues, hw_nrxqueues, hw_nqueues;
4960 
4961 	if (nvectors < 2) {
4962 		sc->sc_nqueues = 1;
4963 		return;
4964 	}
4965 
4966 	switch (sc->sc_type) {
4967 	case WM_T_82572:
4968 		hw_ntxqueues = 2;
4969 		hw_nrxqueues = 2;
4970 		break;
4971 	case WM_T_82574:
4972 		hw_ntxqueues = 2;
4973 		hw_nrxqueues = 2;
4974 		break;
4975 	case WM_T_82575:
4976 		hw_ntxqueues = 4;
4977 		hw_nrxqueues = 4;
4978 		break;
4979 	case WM_T_82576:
4980 		hw_ntxqueues = 16;
4981 		hw_nrxqueues = 16;
4982 		break;
4983 	case WM_T_82580:
4984 	case WM_T_I350:
4985 	case WM_T_I354:
4986 		hw_ntxqueues = 8;
4987 		hw_nrxqueues = 8;
4988 		break;
4989 	case WM_T_I210:
4990 		hw_ntxqueues = 4;
4991 		hw_nrxqueues = 4;
4992 		break;
4993 	case WM_T_I211:
4994 		hw_ntxqueues = 2;
4995 		hw_nrxqueues = 2;
4996 		break;
4997 		/*
4998 		 * As below ethernet controllers does not support MSI-X,
4999 		 * this driver let them not use multiqueue.
5000 		 *     - WM_T_80003
5001 		 *     - WM_T_ICH8
5002 		 *     - WM_T_ICH9
5003 		 *     - WM_T_ICH10
5004 		 *     - WM_T_PCH
5005 		 *     - WM_T_PCH2
5006 		 *     - WM_T_PCH_LPT
5007 		 */
5008 	default:
5009 		hw_ntxqueues = 1;
5010 		hw_nrxqueues = 1;
5011 		break;
5012 	}
5013 
5014 	hw_nqueues = min(hw_ntxqueues, hw_nrxqueues);
5015 
5016 	/*
5017 	 * As queues more than MSI-X vectors cannot improve scaling, we limit
5018 	 * the number of queues used actually.
5019 	 */
5020 	if (nvectors < hw_nqueues + 1)
5021 		sc->sc_nqueues = nvectors - 1;
5022 	else
5023 		sc->sc_nqueues = hw_nqueues;
5024 
5025 	/*
5026 	 * As queues more then cpus cannot improve scaling, we limit
5027 	 * the number of queues used actually.
5028 	 */
5029 	if (ncpu < sc->sc_nqueues)
5030 		sc->sc_nqueues = ncpu;
5031 }
5032 
5033 static inline bool
5034 wm_is_using_msix(struct wm_softc *sc)
5035 {
5036 
5037 	return (sc->sc_nintrs > 1);
5038 }
5039 
5040 static inline bool
5041 wm_is_using_multiqueue(struct wm_softc *sc)
5042 {
5043 
5044 	return (sc->sc_nqueues > 1);
5045 }
5046 
5047 static int
5048 wm_softint_establish(struct wm_softc *sc, int qidx, int intr_idx)
5049 {
5050 	struct wm_queue *wmq = &sc->sc_queue[qidx];
5051 	wmq->wmq_id = qidx;
5052 	wmq->wmq_intr_idx = intr_idx;
5053 	wmq->wmq_si = softint_establish(SOFTINT_NET
5054 #ifdef WM_MPSAFE
5055 	    | SOFTINT_MPSAFE
5056 #endif
5057 	    , wm_handle_queue, wmq);
5058 	if (wmq->wmq_si != NULL)
5059 		return 0;
5060 
5061 	aprint_error_dev(sc->sc_dev, "unable to establish queue[%d] handler\n",
5062 	    wmq->wmq_id);
5063 
5064 	pci_intr_disestablish(sc->sc_pc, sc->sc_ihs[wmq->wmq_intr_idx]);
5065 	sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5066 	return ENOMEM;
5067 }
5068 
5069 /*
5070  * Both single interrupt MSI and INTx can use this function.
5071  */
5072 static int
5073 wm_setup_legacy(struct wm_softc *sc)
5074 {
5075 	pci_chipset_tag_t pc = sc->sc_pc;
5076 	const char *intrstr = NULL;
5077 	char intrbuf[PCI_INTRSTR_LEN];
5078 	int error;
5079 
5080 	error = wm_alloc_txrx_queues(sc);
5081 	if (error) {
5082 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5083 		    error);
5084 		return ENOMEM;
5085 	}
5086 	intrstr = pci_intr_string(pc, sc->sc_intrs[0], intrbuf,
5087 	    sizeof(intrbuf));
5088 #ifdef WM_MPSAFE
5089 	pci_intr_setattr(pc, &sc->sc_intrs[0], PCI_INTR_MPSAFE, true);
5090 #endif
5091 	sc->sc_ihs[0] = pci_intr_establish_xname(pc, sc->sc_intrs[0],
5092 	    IPL_NET, wm_intr_legacy, sc, device_xname(sc->sc_dev));
5093 	if (sc->sc_ihs[0] == NULL) {
5094 		aprint_error_dev(sc->sc_dev,"unable to establish %s\n",
5095 		    (pci_intr_type(pc, sc->sc_intrs[0])
5096 			== PCI_INTR_TYPE_MSI) ? "MSI" : "INTx");
5097 		return ENOMEM;
5098 	}
5099 
5100 	aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
5101 	sc->sc_nintrs = 1;
5102 
5103 	return wm_softint_establish(sc, 0, 0);
5104 }
5105 
5106 static int
5107 wm_setup_msix(struct wm_softc *sc)
5108 {
5109 	void *vih;
5110 	kcpuset_t *affinity;
5111 	int qidx, error, intr_idx, txrx_established;
5112 	pci_chipset_tag_t pc = sc->sc_pc;
5113 	const char *intrstr = NULL;
5114 	char intrbuf[PCI_INTRSTR_LEN];
5115 	char intr_xname[INTRDEVNAMEBUF];
5116 
5117 	if (sc->sc_nqueues < ncpu) {
5118 		/*
5119 		 * To avoid other devices' interrupts, the affinity of Tx/Rx
5120 		 * interrupts start from CPU#1.
5121 		 */
5122 		sc->sc_affinity_offset = 1;
5123 	} else {
5124 		/*
5125 		 * In this case, this device use all CPUs. So, we unify
5126 		 * affinitied cpu_index to msix vector number for readability.
5127 		 */
5128 		sc->sc_affinity_offset = 0;
5129 	}
5130 
5131 	error = wm_alloc_txrx_queues(sc);
5132 	if (error) {
5133 		aprint_error_dev(sc->sc_dev, "cannot allocate queues %d\n",
5134 		    error);
5135 		return ENOMEM;
5136 	}
5137 
5138 	kcpuset_create(&affinity, false);
5139 	intr_idx = 0;
5140 
5141 	/*
5142 	 * TX and RX
5143 	 */
5144 	txrx_established = 0;
5145 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5146 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5147 		int affinity_to = (sc->sc_affinity_offset + intr_idx) % ncpu;
5148 
5149 		intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5150 		    sizeof(intrbuf));
5151 #ifdef WM_MPSAFE
5152 		pci_intr_setattr(pc, &sc->sc_intrs[intr_idx],
5153 		    PCI_INTR_MPSAFE, true);
5154 #endif
5155 		memset(intr_xname, 0, sizeof(intr_xname));
5156 		snprintf(intr_xname, sizeof(intr_xname), "%sTXRX%d",
5157 		    device_xname(sc->sc_dev), qidx);
5158 		vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5159 		    IPL_NET, wm_txrxintr_msix, wmq, intr_xname);
5160 		if (vih == NULL) {
5161 			aprint_error_dev(sc->sc_dev,
5162 			    "unable to establish MSI-X(for TX and RX)%s%s\n",
5163 			    intrstr ? " at " : "",
5164 			    intrstr ? intrstr : "");
5165 
5166 			goto fail;
5167 		}
5168 		kcpuset_zero(affinity);
5169 		/* Round-robin affinity */
5170 		kcpuset_set(affinity, affinity_to);
5171 		error = interrupt_distribute(vih, affinity, NULL);
5172 		if (error == 0) {
5173 			aprint_normal_dev(sc->sc_dev,
5174 			    "for TX and RX interrupting at %s affinity to %u\n",
5175 			    intrstr, affinity_to);
5176 		} else {
5177 			aprint_normal_dev(sc->sc_dev,
5178 			    "for TX and RX interrupting at %s\n", intrstr);
5179 		}
5180 		sc->sc_ihs[intr_idx] = vih;
5181 		if (wm_softint_establish(sc, qidx, intr_idx) != 0)
5182 			goto fail;
5183 		txrx_established++;
5184 		intr_idx++;
5185 	}
5186 
5187 	/*
5188 	 * LINK
5189 	 */
5190 	intrstr = pci_intr_string(pc, sc->sc_intrs[intr_idx], intrbuf,
5191 	    sizeof(intrbuf));
5192 #ifdef WM_MPSAFE
5193 	pci_intr_setattr(pc, &sc->sc_intrs[intr_idx], PCI_INTR_MPSAFE, true);
5194 #endif
5195 	memset(intr_xname, 0, sizeof(intr_xname));
5196 	snprintf(intr_xname, sizeof(intr_xname), "%sLINK",
5197 	    device_xname(sc->sc_dev));
5198 	vih = pci_intr_establish_xname(pc, sc->sc_intrs[intr_idx],
5199 	    IPL_NET, wm_linkintr_msix, sc, intr_xname);
5200 	if (vih == NULL) {
5201 		aprint_error_dev(sc->sc_dev,
5202 		    "unable to establish MSI-X(for LINK)%s%s\n",
5203 		    intrstr ? " at " : "",
5204 		    intrstr ? intrstr : "");
5205 
5206 		goto fail;
5207 	}
5208 	/* keep default affinity to LINK interrupt */
5209 	aprint_normal_dev(sc->sc_dev,
5210 	    "for LINK interrupting at %s\n", intrstr);
5211 	sc->sc_ihs[intr_idx] = vih;
5212 	sc->sc_link_intr_idx = intr_idx;
5213 
5214 	sc->sc_nintrs = sc->sc_nqueues + 1;
5215 	kcpuset_destroy(affinity);
5216 	return 0;
5217 
5218  fail:
5219 	for (qidx = 0; qidx < txrx_established; qidx++) {
5220 		struct wm_queue *wmq = &sc->sc_queue[qidx];
5221 		pci_intr_disestablish(sc->sc_pc,sc->sc_ihs[wmq->wmq_intr_idx]);
5222 		sc->sc_ihs[wmq->wmq_intr_idx] = NULL;
5223 	}
5224 
5225 	kcpuset_destroy(affinity);
5226 	return ENOMEM;
5227 }
5228 
5229 static void
5230 wm_unset_stopping_flags(struct wm_softc *sc)
5231 {
5232 	int i;
5233 
5234 	KASSERT(WM_CORE_LOCKED(sc));
5235 
5236 	/*
5237 	 * must unset stopping flags in ascending order.
5238 	 */
5239 	for (i = 0; i < sc->sc_nqueues; i++) {
5240 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5241 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5242 
5243 		mutex_enter(txq->txq_lock);
5244 		txq->txq_stopping = false;
5245 		mutex_exit(txq->txq_lock);
5246 
5247 		mutex_enter(rxq->rxq_lock);
5248 		rxq->rxq_stopping = false;
5249 		mutex_exit(rxq->rxq_lock);
5250 	}
5251 
5252 	sc->sc_core_stopping = false;
5253 }
5254 
5255 static void
5256 wm_set_stopping_flags(struct wm_softc *sc)
5257 {
5258 	int i;
5259 
5260 	KASSERT(WM_CORE_LOCKED(sc));
5261 
5262 	sc->sc_core_stopping = true;
5263 
5264 	/*
5265 	 * must set stopping flags in ascending order.
5266 	 */
5267 	for (i = 0; i < sc->sc_nqueues; i++) {
5268 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
5269 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
5270 
5271 		mutex_enter(rxq->rxq_lock);
5272 		rxq->rxq_stopping = true;
5273 		mutex_exit(rxq->rxq_lock);
5274 
5275 		mutex_enter(txq->txq_lock);
5276 		txq->txq_stopping = true;
5277 		mutex_exit(txq->txq_lock);
5278 	}
5279 }
5280 
5281 /*
5282  * write interrupt interval value to ITR or EITR
5283  */
5284 static void
5285 wm_itrs_writereg(struct wm_softc *sc, struct wm_queue *wmq)
5286 {
5287 
5288 	if (!wmq->wmq_set_itr)
5289 		return;
5290 
5291 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5292 		uint32_t eitr = __SHIFTIN(wmq->wmq_itr, EITR_ITR_INT_MASK);
5293 
5294 		/*
5295 		 * 82575 doesn't have CNT_INGR field.
5296 		 * So, overwrite counter field by software.
5297 		 */
5298 		if (sc->sc_type == WM_T_82575)
5299 			eitr |= __SHIFTIN(wmq->wmq_itr, EITR_COUNTER_MASK_82575);
5300 		else
5301 			eitr |= EITR_CNT_INGR;
5302 
5303 		CSR_WRITE(sc, WMREG_EITR(wmq->wmq_intr_idx), eitr);
5304 	} else if (sc->sc_type == WM_T_82574 && wm_is_using_msix(sc)) {
5305 		/*
5306 		 * 82574 has both ITR and EITR. SET EITR when we use
5307 		 * the multi queue function with MSI-X.
5308 		 */
5309 		CSR_WRITE(sc, WMREG_EITR_82574(wmq->wmq_intr_idx),
5310 		    wmq->wmq_itr & EITR_ITR_INT_MASK_82574);
5311 	} else {
5312 		KASSERT(wmq->wmq_id == 0);
5313 		CSR_WRITE(sc, WMREG_ITR, wmq->wmq_itr);
5314 	}
5315 
5316 	wmq->wmq_set_itr = false;
5317 }
5318 
5319 /*
5320  * TODO
5321  * Below dynamic calculation of itr is almost the same as linux igb,
5322  * however it does not fit to wm(4). So, we will have been disable AIM
5323  * until we will find appropriate calculation of itr.
5324  */
5325 /*
5326  * calculate interrupt interval value to be going to write register in
5327  * wm_itrs_writereg(). This function does not write ITR/EITR register.
5328  */
5329 static void
5330 wm_itrs_calculate(struct wm_softc *sc, struct wm_queue *wmq)
5331 {
5332 #ifdef NOTYET
5333 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
5334 	struct wm_txqueue *txq = &wmq->wmq_txq;
5335 	uint32_t avg_size = 0;
5336 	uint32_t new_itr;
5337 
5338 	if (rxq->rxq_packets)
5339 		avg_size =  rxq->rxq_bytes / rxq->rxq_packets;
5340 	if (txq->txq_packets)
5341 		avg_size = max(avg_size, txq->txq_bytes / txq->txq_packets);
5342 
5343 	if (avg_size == 0) {
5344 		new_itr = 450; /* restore default value */
5345 		goto out;
5346 	}
5347 
5348 	/* Add 24 bytes to size to account for CRC, preamble, and gap */
5349 	avg_size += 24;
5350 
5351 	/* Don't starve jumbo frames */
5352 	avg_size = min(avg_size, 3000);
5353 
5354 	/* Give a little boost to mid-size frames */
5355 	if ((avg_size > 300) && (avg_size < 1200))
5356 		new_itr = avg_size / 3;
5357 	else
5358 		new_itr = avg_size / 2;
5359 
5360 out:
5361 	/*
5362 	 * The usage of 82574 and 82575 EITR is different from otther NEWQUEUE
5363 	 * controllers. See sc->sc_itr_init setting in wm_init_locked().
5364 	 */
5365 	if ((sc->sc_flags & WM_F_NEWQUEUE) == 0 || sc->sc_type != WM_T_82575)
5366 		new_itr *= 4;
5367 
5368 	if (new_itr != wmq->wmq_itr) {
5369 		wmq->wmq_itr = new_itr;
5370 		wmq->wmq_set_itr = true;
5371 	} else
5372 		wmq->wmq_set_itr = false;
5373 
5374 	rxq->rxq_packets = 0;
5375 	rxq->rxq_bytes = 0;
5376 	txq->txq_packets = 0;
5377 	txq->txq_bytes = 0;
5378 #endif
5379 }
5380 
5381 /*
5382  * wm_init:		[ifnet interface function]
5383  *
5384  *	Initialize the interface.
5385  */
5386 static int
5387 wm_init(struct ifnet *ifp)
5388 {
5389 	struct wm_softc *sc = ifp->if_softc;
5390 	int ret;
5391 
5392 	WM_CORE_LOCK(sc);
5393 	ret = wm_init_locked(ifp);
5394 	WM_CORE_UNLOCK(sc);
5395 
5396 	return ret;
5397 }
5398 
5399 static int
5400 wm_init_locked(struct ifnet *ifp)
5401 {
5402 	struct wm_softc *sc = ifp->if_softc;
5403 	int i, j, trynum, error = 0;
5404 	uint32_t reg;
5405 
5406 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5407 		device_xname(sc->sc_dev), __func__));
5408 	KASSERT(WM_CORE_LOCKED(sc));
5409 
5410 	/*
5411 	 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set.
5412 	 * There is a small but measurable benefit to avoiding the adjusment
5413 	 * of the descriptor so that the headers are aligned, for normal mtu,
5414 	 * on such platforms.  One possibility is that the DMA itself is
5415 	 * slightly more efficient if the front of the entire packet (instead
5416 	 * of the front of the headers) is aligned.
5417 	 *
5418 	 * Note we must always set align_tweak to 0 if we are using
5419 	 * jumbo frames.
5420 	 */
5421 #ifdef __NO_STRICT_ALIGNMENT
5422 	sc->sc_align_tweak = 0;
5423 #else
5424 	if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2))
5425 		sc->sc_align_tweak = 0;
5426 	else
5427 		sc->sc_align_tweak = 2;
5428 #endif /* __NO_STRICT_ALIGNMENT */
5429 
5430 	/* Cancel any pending I/O. */
5431 	wm_stop_locked(ifp, 0);
5432 
5433 	/* update statistics before reset */
5434 	ifp->if_collisions += CSR_READ(sc, WMREG_COLC);
5435 	ifp->if_ierrors += CSR_READ(sc, WMREG_RXERRC);
5436 
5437 	/* PCH_SPT hardware workaround */
5438 	if (sc->sc_type == WM_T_PCH_SPT)
5439 		wm_flush_desc_rings(sc);
5440 
5441 	/* Reset the chip to a known state. */
5442 	wm_reset(sc);
5443 
5444 	/*
5445 	 * AMT based hardware can now take control from firmware
5446 	 * Do this after reset.
5447 	 */
5448 	if ((sc->sc_flags & WM_F_HAS_AMT) != 0)
5449 		wm_get_hw_control(sc);
5450 
5451 	if ((sc->sc_type >= WM_T_PCH_SPT) &&
5452 	    pci_intr_type(sc->sc_pc, sc->sc_intrs[0]) == PCI_INTR_TYPE_INTX)
5453 		wm_legacy_irq_quirk_spt(sc);
5454 
5455 	/* Init hardware bits */
5456 	wm_initialize_hardware_bits(sc);
5457 
5458 	/* Reset the PHY. */
5459 	if (sc->sc_flags & WM_F_HAS_MII)
5460 		wm_gmii_reset(sc);
5461 
5462 	/* Calculate (E)ITR value */
5463 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0 && sc->sc_type != WM_T_82575) {
5464 		/*
5465 		 * For NEWQUEUE's EITR (except for 82575).
5466 		 * 82575's EITR should be set same throttling value as other
5467 		 * old controllers' ITR because the interrupt/sec calculation
5468 		 * is the same, that is, 1,000,000,000 / (N * 256).
5469 		 *
5470 		 * 82574's EITR should be set same throttling value as ITR.
5471 		 *
5472 		 * For N interrupts/sec, set this value to:
5473 		 * 1,000,000 / N in contrast to ITR throttoling value.
5474 		 */
5475 		sc->sc_itr_init = 450;
5476 	} else if (sc->sc_type >= WM_T_82543) {
5477 		/*
5478 		 * Set up the interrupt throttling register (units of 256ns)
5479 		 * Note that a footnote in Intel's documentation says this
5480 		 * ticker runs at 1/4 the rate when the chip is in 100Mbit
5481 		 * or 10Mbit mode.  Empirically, it appears to be the case
5482 		 * that that is also true for the 1024ns units of the other
5483 		 * interrupt-related timer registers -- so, really, we ought
5484 		 * to divide this value by 4 when the link speed is low.
5485 		 *
5486 		 * XXX implement this division at link speed change!
5487 		 */
5488 
5489 		/*
5490 		 * For N interrupts/sec, set this value to:
5491 		 * 1,000,000,000 / (N * 256).  Note that we set the
5492 		 * absolute and packet timer values to this value
5493 		 * divided by 4 to get "simple timer" behavior.
5494 		 */
5495 		sc->sc_itr_init = 1500;		/* 2604 ints/sec */
5496 	}
5497 
5498 	error = wm_init_txrx_queues(sc);
5499 	if (error)
5500 		goto out;
5501 
5502 	/*
5503 	 * Clear out the VLAN table -- we don't use it (yet).
5504 	 */
5505 	CSR_WRITE(sc, WMREG_VET, 0);
5506 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354))
5507 		trynum = 10; /* Due to hw errata */
5508 	else
5509 		trynum = 1;
5510 	for (i = 0; i < WM_VLAN_TABSIZE; i++)
5511 		for (j = 0; j < trynum; j++)
5512 			CSR_WRITE(sc, WMREG_VFTA + (i << 2), 0);
5513 
5514 	/*
5515 	 * Set up flow-control parameters.
5516 	 *
5517 	 * XXX Values could probably stand some tuning.
5518 	 */
5519 	if ((sc->sc_type != WM_T_ICH8) && (sc->sc_type != WM_T_ICH9)
5520 	    && (sc->sc_type != WM_T_ICH10) && (sc->sc_type != WM_T_PCH)
5521 	    && (sc->sc_type != WM_T_PCH2) && (sc->sc_type != WM_T_PCH_LPT)
5522 	    && (sc->sc_type != WM_T_PCH_SPT) && (sc->sc_type != WM_T_PCH_CNP)){
5523 		CSR_WRITE(sc, WMREG_FCAL, FCAL_CONST);
5524 		CSR_WRITE(sc, WMREG_FCAH, FCAH_CONST);
5525 		CSR_WRITE(sc, WMREG_FCT, ETHERTYPE_FLOWCONTROL);
5526 	}
5527 
5528 	sc->sc_fcrtl = FCRTL_DFLT;
5529 	if (sc->sc_type < WM_T_82543) {
5530 		CSR_WRITE(sc, WMREG_OLD_FCRTH, FCRTH_DFLT);
5531 		CSR_WRITE(sc, WMREG_OLD_FCRTL, sc->sc_fcrtl);
5532 	} else {
5533 		CSR_WRITE(sc, WMREG_FCRTH, FCRTH_DFLT);
5534 		CSR_WRITE(sc, WMREG_FCRTL, sc->sc_fcrtl);
5535 	}
5536 
5537 	if (sc->sc_type == WM_T_80003)
5538 		CSR_WRITE(sc, WMREG_FCTTV, 0xffff);
5539 	else
5540 		CSR_WRITE(sc, WMREG_FCTTV, FCTTV_DFLT);
5541 
5542 	/* Writes the control register. */
5543 	wm_set_vlan(sc);
5544 
5545 	if (sc->sc_flags & WM_F_HAS_MII) {
5546 		uint16_t kmreg;
5547 
5548 		switch (sc->sc_type) {
5549 		case WM_T_80003:
5550 		case WM_T_ICH8:
5551 		case WM_T_ICH9:
5552 		case WM_T_ICH10:
5553 		case WM_T_PCH:
5554 		case WM_T_PCH2:
5555 		case WM_T_PCH_LPT:
5556 		case WM_T_PCH_SPT:
5557 		case WM_T_PCH_CNP:
5558 			/*
5559 			 * Set the mac to wait the maximum time between each
5560 			 * iteration and increase the max iterations when
5561 			 * polling the phy; this fixes erroneous timeouts at
5562 			 * 10Mbps.
5563 			 */
5564 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_TIMEOUTS,
5565 			    0xFFFF);
5566 			wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
5567 			    &kmreg);
5568 			kmreg |= 0x3F;
5569 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_PARAM,
5570 			    kmreg);
5571 			break;
5572 		default:
5573 			break;
5574 		}
5575 
5576 		if (sc->sc_type == WM_T_80003) {
5577 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5578 			reg &= ~CTRL_EXT_LINK_MODE_MASK;
5579 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5580 
5581 			/* Bypass RX and TX FIFO's */
5582 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_FIFO_CTRL,
5583 			    KUMCTRLSTA_FIFO_CTRL_RX_BYPASS
5584 			    | KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
5585 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_INB_CTRL,
5586 			    KUMCTRLSTA_INB_CTRL_DIS_PADDING |
5587 			    KUMCTRLSTA_INB_CTRL_LINK_TMOUT_DFLT);
5588 		}
5589 	}
5590 #if 0
5591 	CSR_WRITE(sc, WMREG_CTRL_EXT, sc->sc_ctrl_ext);
5592 #endif
5593 
5594 	/* Set up checksum offload parameters. */
5595 	reg = CSR_READ(sc, WMREG_RXCSUM);
5596 	reg &= ~(RXCSUM_IPOFL | RXCSUM_IPV6OFL | RXCSUM_TUOFL);
5597 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
5598 		reg |= RXCSUM_IPOFL;
5599 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx))
5600 		reg |= RXCSUM_IPOFL | RXCSUM_TUOFL;
5601 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
5602 		reg |= RXCSUM_IPV6OFL | RXCSUM_TUOFL;
5603 	CSR_WRITE(sc, WMREG_RXCSUM, reg);
5604 
5605 	/* Set registers about MSI-X */
5606 	if (wm_is_using_msix(sc)) {
5607 		uint32_t ivar;
5608 		struct wm_queue *wmq;
5609 		int qid, qintr_idx;
5610 
5611 		if (sc->sc_type == WM_T_82575) {
5612 			/* Interrupt control */
5613 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5614 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME | CTRL_EXT_NSICR;
5615 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5616 
5617 			/* TX and RX */
5618 			for (i = 0; i < sc->sc_nqueues; i++) {
5619 				wmq = &sc->sc_queue[i];
5620 				CSR_WRITE(sc, WMREG_MSIXBM(wmq->wmq_intr_idx),
5621 				    EITR_TX_QUEUE(wmq->wmq_id)
5622 				    | EITR_RX_QUEUE(wmq->wmq_id));
5623 			}
5624 			/* Link status */
5625 			CSR_WRITE(sc, WMREG_MSIXBM(sc->sc_link_intr_idx),
5626 			    EITR_OTHER);
5627 		} else if (sc->sc_type == WM_T_82574) {
5628 			/* Interrupt control */
5629 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
5630 			reg |= CTRL_EXT_PBA | CTRL_EXT_EIAME;
5631 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
5632 
5633 			/*
5634 			 * workaround issue with spurious interrupts
5635 			 * in MSI-X mode.
5636 			 * At wm_initialize_hardware_bits(), sc_nintrs has not
5637 			 * initialized yet. So re-initialize WMREG_RFCTL here.
5638 			 */
5639 			reg = CSR_READ(sc, WMREG_RFCTL);
5640 			reg |= WMREG_RFCTL_ACKDIS;
5641 			CSR_WRITE(sc, WMREG_RFCTL, reg);
5642 
5643 			ivar = 0;
5644 			/* TX and RX */
5645 			for (i = 0; i < sc->sc_nqueues; i++) {
5646 				wmq = &sc->sc_queue[i];
5647 				qid = wmq->wmq_id;
5648 				qintr_idx = wmq->wmq_intr_idx;
5649 
5650 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5651 				    IVAR_TX_MASK_Q_82574(qid));
5652 				ivar |= __SHIFTIN((IVAR_VALID_82574|qintr_idx),
5653 				    IVAR_RX_MASK_Q_82574(qid));
5654 			}
5655 			/* Link status */
5656 			ivar |= __SHIFTIN((IVAR_VALID_82574
5657 				| sc->sc_link_intr_idx), IVAR_OTHER_MASK);
5658 			CSR_WRITE(sc, WMREG_IVAR, ivar | IVAR_INT_ON_ALL_WB);
5659 		} else {
5660 			/* Interrupt control */
5661 			CSR_WRITE(sc, WMREG_GPIE, GPIE_NSICR | GPIE_MULTI_MSIX
5662 			    | GPIE_EIAME | GPIE_PBA);
5663 
5664 			switch (sc->sc_type) {
5665 			case WM_T_82580:
5666 			case WM_T_I350:
5667 			case WM_T_I354:
5668 			case WM_T_I210:
5669 			case WM_T_I211:
5670 				/* TX and RX */
5671 				for (i = 0; i < sc->sc_nqueues; i++) {
5672 					wmq = &sc->sc_queue[i];
5673 					qid = wmq->wmq_id;
5674 					qintr_idx = wmq->wmq_intr_idx;
5675 
5676 					ivar = CSR_READ(sc, WMREG_IVAR_Q(qid));
5677 					ivar &= ~IVAR_TX_MASK_Q(qid);
5678 					ivar |= __SHIFTIN((qintr_idx
5679 						| IVAR_VALID),
5680 					    IVAR_TX_MASK_Q(qid));
5681 					ivar &= ~IVAR_RX_MASK_Q(qid);
5682 					ivar |= __SHIFTIN((qintr_idx
5683 						| IVAR_VALID),
5684 					    IVAR_RX_MASK_Q(qid));
5685 					CSR_WRITE(sc, WMREG_IVAR_Q(qid), ivar);
5686 				}
5687 				break;
5688 			case WM_T_82576:
5689 				/* TX and RX */
5690 				for (i = 0; i < sc->sc_nqueues; i++) {
5691 					wmq = &sc->sc_queue[i];
5692 					qid = wmq->wmq_id;
5693 					qintr_idx = wmq->wmq_intr_idx;
5694 
5695 					ivar = CSR_READ(sc,
5696 					    WMREG_IVAR_Q_82576(qid));
5697 					ivar &= ~IVAR_TX_MASK_Q_82576(qid);
5698 					ivar |= __SHIFTIN((qintr_idx
5699 						| IVAR_VALID),
5700 					    IVAR_TX_MASK_Q_82576(qid));
5701 					ivar &= ~IVAR_RX_MASK_Q_82576(qid);
5702 					ivar |= __SHIFTIN((qintr_idx
5703 						| IVAR_VALID),
5704 					    IVAR_RX_MASK_Q_82576(qid));
5705 					CSR_WRITE(sc, WMREG_IVAR_Q_82576(qid),
5706 					    ivar);
5707 				}
5708 				break;
5709 			default:
5710 				break;
5711 			}
5712 
5713 			/* Link status */
5714 			ivar = __SHIFTIN((sc->sc_link_intr_idx | IVAR_VALID),
5715 			    IVAR_MISC_OTHER);
5716 			CSR_WRITE(sc, WMREG_IVAR_MISC, ivar);
5717 		}
5718 
5719 		if (wm_is_using_multiqueue(sc)) {
5720 			wm_init_rss(sc);
5721 
5722 			/*
5723 			** NOTE: Receive Full-Packet Checksum Offload
5724 			** is mutually exclusive with Multiqueue. However
5725 			** this is not the same as TCP/IP checksums which
5726 			** still work.
5727 			*/
5728 			reg = CSR_READ(sc, WMREG_RXCSUM);
5729 			reg |= RXCSUM_PCSD;
5730 			CSR_WRITE(sc, WMREG_RXCSUM, reg);
5731 		}
5732 	}
5733 
5734 	/* Set up the interrupt registers. */
5735 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
5736 	sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 |
5737 	    ICR_RXO | ICR_RXT0;
5738 	if (wm_is_using_msix(sc)) {
5739 		uint32_t mask;
5740 		struct wm_queue *wmq;
5741 
5742 		switch (sc->sc_type) {
5743 		case WM_T_82574:
5744 			mask = 0;
5745 			for (i = 0; i < sc->sc_nqueues; i++) {
5746 				wmq = &sc->sc_queue[i];
5747 				mask |= ICR_TXQ(wmq->wmq_id);
5748 				mask |= ICR_RXQ(wmq->wmq_id);
5749 			}
5750 			mask |= ICR_OTHER;
5751 			CSR_WRITE(sc, WMREG_EIAC_82574, mask);
5752 			CSR_WRITE(sc, WMREG_IMS, mask | ICR_LSC);
5753 			break;
5754 		default:
5755 			if (sc->sc_type == WM_T_82575) {
5756 				mask = 0;
5757 				for (i = 0; i < sc->sc_nqueues; i++) {
5758 					wmq = &sc->sc_queue[i];
5759 					mask |= EITR_TX_QUEUE(wmq->wmq_id);
5760 					mask |= EITR_RX_QUEUE(wmq->wmq_id);
5761 				}
5762 				mask |= EITR_OTHER;
5763 			} else {
5764 				mask = 0;
5765 				for (i = 0; i < sc->sc_nqueues; i++) {
5766 					wmq = &sc->sc_queue[i];
5767 					mask |= 1 << wmq->wmq_intr_idx;
5768 				}
5769 				mask |= 1 << sc->sc_link_intr_idx;
5770 			}
5771 			CSR_WRITE(sc, WMREG_EIAC, mask);
5772 			CSR_WRITE(sc, WMREG_EIAM, mask);
5773 			CSR_WRITE(sc, WMREG_EIMS, mask);
5774 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
5775 			break;
5776 		}
5777 	} else
5778 		CSR_WRITE(sc, WMREG_IMS, sc->sc_icr);
5779 
5780 	/* Set up the inter-packet gap. */
5781 	CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
5782 
5783 	if (sc->sc_type >= WM_T_82543) {
5784 		for (int qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5785 			struct wm_queue *wmq = &sc->sc_queue[qidx];
5786 			wm_itrs_writereg(sc, wmq);
5787 		}
5788 		/*
5789 		 * Link interrupts occur much less than TX
5790 		 * interrupts and RX interrupts. So, we don't
5791 		 * tune EINTR(WM_MSIX_LINKINTR_IDX) value like
5792 		 * FreeBSD's if_igb.
5793 		 */
5794 	}
5795 
5796 	/* Set the VLAN ethernetype. */
5797 	CSR_WRITE(sc, WMREG_VET, ETHERTYPE_VLAN);
5798 
5799 	/*
5800 	 * Set up the transmit control register; we start out with
5801 	 * a collision distance suitable for FDX, but update it whe
5802 	 * we resolve the media type.
5803 	 */
5804 	sc->sc_tctl = TCTL_EN | TCTL_PSP | TCTL_RTLC
5805 	    | TCTL_CT(TX_COLLISION_THRESHOLD)
5806 	    | TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
5807 	if (sc->sc_type >= WM_T_82571)
5808 		sc->sc_tctl |= TCTL_MULR;
5809 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
5810 
5811 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5812 		/* Write TDT after TCTL.EN is set. See the document. */
5813 		CSR_WRITE(sc, WMREG_TDT(0), 0);
5814 	}
5815 
5816 	if (sc->sc_type == WM_T_80003) {
5817 		reg = CSR_READ(sc, WMREG_TCTL_EXT);
5818 		reg &= ~TCTL_EXT_GCEX_MASK;
5819 		reg |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
5820 		CSR_WRITE(sc, WMREG_TCTL_EXT, reg);
5821 	}
5822 
5823 	/* Set the media. */
5824 	if ((error = mii_ifmedia_change(&sc->sc_mii)) != 0)
5825 		goto out;
5826 
5827 	/* Configure for OS presence */
5828 	wm_init_manageability(sc);
5829 
5830 	/*
5831 	 * Set up the receive control register; we actually program the
5832 	 * register when we set the receive filter. Use multicast address
5833 	 * offset type 0.
5834 	 *
5835 	 * Only the i82544 has the ability to strip the incoming CRC, so we
5836 	 * don't enable that feature.
5837 	 */
5838 	sc->sc_mchash_type = 0;
5839 	sc->sc_rctl = RCTL_EN | RCTL_LBM_NONE | RCTL_RDMTS_1_2 | RCTL_DPF
5840 	    | RCTL_MO(sc->sc_mchash_type);
5841 
5842 	/*
5843 	 * 82574 use one buffer extended Rx descriptor.
5844 	 */
5845 	if (sc->sc_type == WM_T_82574)
5846 		sc->sc_rctl |= RCTL_DTYP_ONEBUF;
5847 
5848 	/*
5849 	 * The I350 has a bug where it always strips the CRC whether
5850 	 * asked to or not. So ask for stripped CRC here and cope in rxeof
5851 	 */
5852 	if ((sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
5853 	    || (sc->sc_type == WM_T_I210))
5854 		sc->sc_rctl |= RCTL_SECRC;
5855 
5856 	if (((sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) != 0)
5857 	    && (ifp->if_mtu > ETHERMTU)) {
5858 		sc->sc_rctl |= RCTL_LPE;
5859 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
5860 			CSR_WRITE(sc, WMREG_RLPML, ETHER_MAX_LEN_JUMBO);
5861 	}
5862 
5863 	if (MCLBYTES == 2048) {
5864 		sc->sc_rctl |= RCTL_2k;
5865 	} else {
5866 		if (sc->sc_type >= WM_T_82543) {
5867 			switch (MCLBYTES) {
5868 			case 4096:
5869 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_4k;
5870 				break;
5871 			case 8192:
5872 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_8k;
5873 				break;
5874 			case 16384:
5875 				sc->sc_rctl |= RCTL_BSEX | RCTL_BSEX_16k;
5876 				break;
5877 			default:
5878 				panic("wm_init: MCLBYTES %d unsupported",
5879 				    MCLBYTES);
5880 				break;
5881 			}
5882 		} else panic("wm_init: i82542 requires MCLBYTES = 2048");
5883 	}
5884 
5885 	/* Enable ECC */
5886 	switch (sc->sc_type) {
5887 	case WM_T_82571:
5888 		reg = CSR_READ(sc, WMREG_PBA_ECC);
5889 		reg |= PBA_ECC_CORR_EN;
5890 		CSR_WRITE(sc, WMREG_PBA_ECC, reg);
5891 		break;
5892 	case WM_T_PCH_LPT:
5893 	case WM_T_PCH_SPT:
5894 	case WM_T_PCH_CNP:
5895 		reg = CSR_READ(sc, WMREG_PBECCSTS);
5896 		reg |= PBECCSTS_UNCORR_ECC_ENABLE;
5897 		CSR_WRITE(sc, WMREG_PBECCSTS, reg);
5898 
5899 		sc->sc_ctrl |= CTRL_MEHE;
5900 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
5901 		break;
5902 	default:
5903 		break;
5904 	}
5905 
5906 	/*
5907 	 * Set the receive filter.
5908 	 *
5909 	 * For 82575 and 82576, the RX descriptors must be initialized after
5910 	 * the setting of RCTL.EN in wm_set_filter()
5911 	 */
5912 	wm_set_filter(sc);
5913 
5914 	/* On 575 and later set RDT only if RX enabled */
5915 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
5916 		int qidx;
5917 		for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
5918 			struct wm_rxqueue *rxq = &sc->sc_queue[qidx].wmq_rxq;
5919 			for (i = 0; i < WM_NRXDESC; i++) {
5920 				mutex_enter(rxq->rxq_lock);
5921 				wm_init_rxdesc(rxq, i);
5922 				mutex_exit(rxq->rxq_lock);
5923 
5924 			}
5925 		}
5926 	}
5927 
5928 	wm_unset_stopping_flags(sc);
5929 
5930 	/* Start the one second link check clock. */
5931 	callout_reset(&sc->sc_tick_ch, hz, wm_tick, sc);
5932 
5933 	/* ...all done! */
5934 	ifp->if_flags |= IFF_RUNNING;
5935 	ifp->if_flags &= ~IFF_OACTIVE;
5936 
5937  out:
5938 	sc->sc_if_flags = ifp->if_flags;
5939 	if (error)
5940 		log(LOG_ERR, "%s: interface not running\n",
5941 		    device_xname(sc->sc_dev));
5942 	return error;
5943 }
5944 
5945 /*
5946  * wm_stop:		[ifnet interface function]
5947  *
5948  *	Stop transmission on the interface.
5949  */
5950 static void
5951 wm_stop(struct ifnet *ifp, int disable)
5952 {
5953 	struct wm_softc *sc = ifp->if_softc;
5954 
5955 	WM_CORE_LOCK(sc);
5956 	wm_stop_locked(ifp, disable);
5957 	WM_CORE_UNLOCK(sc);
5958 }
5959 
5960 static void
5961 wm_stop_locked(struct ifnet *ifp, int disable)
5962 {
5963 	struct wm_softc *sc = ifp->if_softc;
5964 	struct wm_txsoft *txs;
5965 	int i, qidx;
5966 
5967 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
5968 		device_xname(sc->sc_dev), __func__));
5969 	KASSERT(WM_CORE_LOCKED(sc));
5970 
5971 	wm_set_stopping_flags(sc);
5972 
5973 	/* Stop the one second clock. */
5974 	callout_stop(&sc->sc_tick_ch);
5975 
5976 	/* Stop the 82547 Tx FIFO stall check timer. */
5977 	if (sc->sc_type == WM_T_82547)
5978 		callout_stop(&sc->sc_txfifo_ch);
5979 
5980 	if (sc->sc_flags & WM_F_HAS_MII) {
5981 		/* Down the MII. */
5982 		mii_down(&sc->sc_mii);
5983 	} else {
5984 #if 0
5985 		/* Should we clear PHY's status properly? */
5986 		wm_reset(sc);
5987 #endif
5988 	}
5989 
5990 	/* Stop the transmit and receive processes. */
5991 	CSR_WRITE(sc, WMREG_TCTL, 0);
5992 	CSR_WRITE(sc, WMREG_RCTL, 0);
5993 	sc->sc_rctl &= ~RCTL_EN;
5994 
5995 	/*
5996 	 * Clear the interrupt mask to ensure the device cannot assert its
5997 	 * interrupt line.
5998 	 * Clear sc->sc_icr to ensure wm_intr_legacy() makes no attempt to
5999 	 * service any currently pending or shared interrupt.
6000 	 */
6001 	CSR_WRITE(sc, WMREG_IMC, 0xffffffffU);
6002 	sc->sc_icr = 0;
6003 	if (wm_is_using_msix(sc)) {
6004 		if (sc->sc_type != WM_T_82574) {
6005 			CSR_WRITE(sc, WMREG_EIMC, 0xffffffffU);
6006 			CSR_WRITE(sc, WMREG_EIAC, 0);
6007 		} else
6008 			CSR_WRITE(sc, WMREG_EIAC_82574, 0);
6009 	}
6010 
6011 	/* Release any queued transmit buffers. */
6012 	for (qidx = 0; qidx < sc->sc_nqueues; qidx++) {
6013 		struct wm_queue *wmq = &sc->sc_queue[qidx];
6014 		struct wm_txqueue *txq = &wmq->wmq_txq;
6015 		mutex_enter(txq->txq_lock);
6016 		txq->txq_sending = false; /* ensure watchdog disabled */
6017 		for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6018 			txs = &txq->txq_soft[i];
6019 			if (txs->txs_mbuf != NULL) {
6020 				bus_dmamap_unload(sc->sc_dmat,txs->txs_dmamap);
6021 				m_freem(txs->txs_mbuf);
6022 				txs->txs_mbuf = NULL;
6023 			}
6024 		}
6025 		mutex_exit(txq->txq_lock);
6026 	}
6027 
6028 	/* Mark the interface as down and cancel the watchdog timer. */
6029 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6030 
6031 	if (disable) {
6032 		for (i = 0; i < sc->sc_nqueues; i++) {
6033 			struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6034 			mutex_enter(rxq->rxq_lock);
6035 			wm_rxdrain(rxq);
6036 			mutex_exit(rxq->rxq_lock);
6037 		}
6038 	}
6039 
6040 #if 0 /* notyet */
6041 	if (sc->sc_type >= WM_T_82544)
6042 		CSR_WRITE(sc, WMREG_WUC, 0);
6043 #endif
6044 }
6045 
6046 static void
6047 wm_dump_mbuf_chain(struct wm_softc *sc, struct mbuf *m0)
6048 {
6049 	struct mbuf *m;
6050 	int i;
6051 
6052 	log(LOG_DEBUG, "%s: mbuf chain:\n", device_xname(sc->sc_dev));
6053 	for (m = m0, i = 0; m != NULL; m = m->m_next, i++)
6054 		log(LOG_DEBUG, "%s:\tm_data = %p, m_len = %d, "
6055 		    "m_flags = 0x%08x\n", device_xname(sc->sc_dev),
6056 		    m->m_data, m->m_len, m->m_flags);
6057 	log(LOG_DEBUG, "%s:\t%d mbuf%s in chain\n", device_xname(sc->sc_dev),
6058 	    i, i == 1 ? "" : "s");
6059 }
6060 
6061 /*
6062  * wm_82547_txfifo_stall:
6063  *
6064  *	Callout used to wait for the 82547 Tx FIFO to drain,
6065  *	reset the FIFO pointers, and restart packet transmission.
6066  */
6067 static void
6068 wm_82547_txfifo_stall(void *arg)
6069 {
6070 	struct wm_softc *sc = arg;
6071 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6072 
6073 	mutex_enter(txq->txq_lock);
6074 
6075 	if (txq->txq_stopping)
6076 		goto out;
6077 
6078 	if (txq->txq_fifo_stall) {
6079 		if (CSR_READ(sc, WMREG_TDT(0)) == CSR_READ(sc, WMREG_TDH(0)) &&
6080 		    CSR_READ(sc, WMREG_TDFT) == CSR_READ(sc, WMREG_TDFH) &&
6081 		    CSR_READ(sc, WMREG_TDFTS) == CSR_READ(sc, WMREG_TDFHS)) {
6082 			/*
6083 			 * Packets have drained.  Stop transmitter, reset
6084 			 * FIFO pointers, restart transmitter, and kick
6085 			 * the packet queue.
6086 			 */
6087 			uint32_t tctl = CSR_READ(sc, WMREG_TCTL);
6088 			CSR_WRITE(sc, WMREG_TCTL, tctl & ~TCTL_EN);
6089 			CSR_WRITE(sc, WMREG_TDFT, txq->txq_fifo_addr);
6090 			CSR_WRITE(sc, WMREG_TDFH, txq->txq_fifo_addr);
6091 			CSR_WRITE(sc, WMREG_TDFTS, txq->txq_fifo_addr);
6092 			CSR_WRITE(sc, WMREG_TDFHS, txq->txq_fifo_addr);
6093 			CSR_WRITE(sc, WMREG_TCTL, tctl);
6094 			CSR_WRITE_FLUSH(sc);
6095 
6096 			txq->txq_fifo_head = 0;
6097 			txq->txq_fifo_stall = 0;
6098 			wm_start_locked(&sc->sc_ethercom.ec_if);
6099 		} else {
6100 			/*
6101 			 * Still waiting for packets to drain; try again in
6102 			 * another tick.
6103 			 */
6104 			callout_schedule(&sc->sc_txfifo_ch, 1);
6105 		}
6106 	}
6107 
6108 out:
6109 	mutex_exit(txq->txq_lock);
6110 }
6111 
6112 /*
6113  * wm_82547_txfifo_bugchk:
6114  *
6115  *	Check for bug condition in the 82547 Tx FIFO.  We need to
6116  *	prevent enqueueing a packet that would wrap around the end
6117  *	if the Tx FIFO ring buffer, otherwise the chip will croak.
6118  *
6119  *	We do this by checking the amount of space before the end
6120  *	of the Tx FIFO buffer. If the packet will not fit, we "stall"
6121  *	the Tx FIFO, wait for all remaining packets to drain, reset
6122  *	the internal FIFO pointers to the beginning, and restart
6123  *	transmission on the interface.
6124  */
6125 #define	WM_FIFO_HDR		0x10
6126 #define	WM_82547_PAD_LEN	0x3e0
6127 static int
6128 wm_82547_txfifo_bugchk(struct wm_softc *sc, struct mbuf *m0)
6129 {
6130 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
6131 	int space = txq->txq_fifo_size - txq->txq_fifo_head;
6132 	int len = roundup(m0->m_pkthdr.len + WM_FIFO_HDR, WM_FIFO_HDR);
6133 
6134 	/* Just return if already stalled. */
6135 	if (txq->txq_fifo_stall)
6136 		return 1;
6137 
6138 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
6139 		/* Stall only occurs in half-duplex mode. */
6140 		goto send_packet;
6141 	}
6142 
6143 	if (len >= WM_82547_PAD_LEN + space) {
6144 		txq->txq_fifo_stall = 1;
6145 		callout_schedule(&sc->sc_txfifo_ch, 1);
6146 		return 1;
6147 	}
6148 
6149  send_packet:
6150 	txq->txq_fifo_head += len;
6151 	if (txq->txq_fifo_head >= txq->txq_fifo_size)
6152 		txq->txq_fifo_head -= txq->txq_fifo_size;
6153 
6154 	return 0;
6155 }
6156 
6157 static int
6158 wm_alloc_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6159 {
6160 	int error;
6161 
6162 	/*
6163 	 * Allocate the control data structures, and create and load the
6164 	 * DMA map for it.
6165 	 *
6166 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6167 	 * memory.  So must Rx descriptors.  We simplify by allocating
6168 	 * both sets within the same 4G segment.
6169 	 */
6170 	if (sc->sc_type < WM_T_82544)
6171 		WM_NTXDESC(txq) = WM_NTXDESC_82542;
6172 	else
6173 		WM_NTXDESC(txq) = WM_NTXDESC_82544;
6174 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6175 		txq->txq_descsize = sizeof(nq_txdesc_t);
6176 	else
6177 		txq->txq_descsize = sizeof(wiseman_txdesc_t);
6178 
6179 	if ((error = bus_dmamem_alloc(sc->sc_dmat, WM_TXDESCS_SIZE(txq),
6180 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &txq->txq_desc_seg,
6181 		    1, &txq->txq_desc_rseg, 0)) != 0) {
6182 		aprint_error_dev(sc->sc_dev,
6183 		    "unable to allocate TX control data, error = %d\n",
6184 		    error);
6185 		goto fail_0;
6186 	}
6187 
6188 	if ((error = bus_dmamem_map(sc->sc_dmat, &txq->txq_desc_seg,
6189 		    txq->txq_desc_rseg, WM_TXDESCS_SIZE(txq),
6190 		    (void **)&txq->txq_descs_u, BUS_DMA_COHERENT)) != 0) {
6191 		aprint_error_dev(sc->sc_dev,
6192 		    "unable to map TX control data, error = %d\n", error);
6193 		goto fail_1;
6194 	}
6195 
6196 	if ((error = bus_dmamap_create(sc->sc_dmat, WM_TXDESCS_SIZE(txq), 1,
6197 		    WM_TXDESCS_SIZE(txq), 0, 0, &txq->txq_desc_dmamap)) != 0) {
6198 		aprint_error_dev(sc->sc_dev,
6199 		    "unable to create TX control data DMA map, error = %d\n",
6200 		    error);
6201 		goto fail_2;
6202 	}
6203 
6204 	if ((error = bus_dmamap_load(sc->sc_dmat, txq->txq_desc_dmamap,
6205 		    txq->txq_descs_u, WM_TXDESCS_SIZE(txq), NULL, 0)) != 0) {
6206 		aprint_error_dev(sc->sc_dev,
6207 		    "unable to load TX control data DMA map, error = %d\n",
6208 		    error);
6209 		goto fail_3;
6210 	}
6211 
6212 	return 0;
6213 
6214  fail_3:
6215 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6216  fail_2:
6217 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6218 	    WM_TXDESCS_SIZE(txq));
6219  fail_1:
6220 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6221  fail_0:
6222 	return error;
6223 }
6224 
6225 static void
6226 wm_free_tx_descs(struct wm_softc *sc, struct wm_txqueue *txq)
6227 {
6228 
6229 	bus_dmamap_unload(sc->sc_dmat, txq->txq_desc_dmamap);
6230 	bus_dmamap_destroy(sc->sc_dmat, txq->txq_desc_dmamap);
6231 	bus_dmamem_unmap(sc->sc_dmat, (void *)txq->txq_descs_u,
6232 	    WM_TXDESCS_SIZE(txq));
6233 	bus_dmamem_free(sc->sc_dmat, &txq->txq_desc_seg, txq->txq_desc_rseg);
6234 }
6235 
6236 static int
6237 wm_alloc_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6238 {
6239 	int error;
6240 	size_t rxq_descs_size;
6241 
6242 	/*
6243 	 * Allocate the control data structures, and create and load the
6244 	 * DMA map for it.
6245 	 *
6246 	 * NOTE: All Tx descriptors must be in the same 4G segment of
6247 	 * memory.  So must Rx descriptors.  We simplify by allocating
6248 	 * both sets within the same 4G segment.
6249 	 */
6250 	rxq->rxq_ndesc = WM_NRXDESC;
6251 	if (sc->sc_type == WM_T_82574)
6252 		rxq->rxq_descsize = sizeof(ext_rxdesc_t);
6253 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6254 		rxq->rxq_descsize = sizeof(nq_rxdesc_t);
6255 	else
6256 		rxq->rxq_descsize = sizeof(wiseman_rxdesc_t);
6257 	rxq_descs_size = rxq->rxq_descsize * rxq->rxq_ndesc;
6258 
6259 	if ((error = bus_dmamem_alloc(sc->sc_dmat, rxq_descs_size,
6260 		    PAGE_SIZE, (bus_size_t) 0x100000000ULL, &rxq->rxq_desc_seg,
6261 		    1, &rxq->rxq_desc_rseg, 0)) != 0) {
6262 		aprint_error_dev(sc->sc_dev,
6263 		    "unable to allocate RX control data, error = %d\n",
6264 		    error);
6265 		goto fail_0;
6266 	}
6267 
6268 	if ((error = bus_dmamem_map(sc->sc_dmat, &rxq->rxq_desc_seg,
6269 		    rxq->rxq_desc_rseg, rxq_descs_size,
6270 		    (void **)&rxq->rxq_descs_u, BUS_DMA_COHERENT)) != 0) {
6271 		aprint_error_dev(sc->sc_dev,
6272 		    "unable to map RX control data, error = %d\n", error);
6273 		goto fail_1;
6274 	}
6275 
6276 	if ((error = bus_dmamap_create(sc->sc_dmat, rxq_descs_size, 1,
6277 		    rxq_descs_size, 0, 0, &rxq->rxq_desc_dmamap)) != 0) {
6278 		aprint_error_dev(sc->sc_dev,
6279 		    "unable to create RX control data DMA map, error = %d\n",
6280 		    error);
6281 		goto fail_2;
6282 	}
6283 
6284 	if ((error = bus_dmamap_load(sc->sc_dmat, rxq->rxq_desc_dmamap,
6285 		    rxq->rxq_descs_u, rxq_descs_size, NULL, 0)) != 0) {
6286 		aprint_error_dev(sc->sc_dev,
6287 		    "unable to load RX control data DMA map, error = %d\n",
6288 		    error);
6289 		goto fail_3;
6290 	}
6291 
6292 	return 0;
6293 
6294  fail_3:
6295 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6296  fail_2:
6297 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6298 	    rxq_descs_size);
6299  fail_1:
6300 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6301  fail_0:
6302 	return error;
6303 }
6304 
6305 static void
6306 wm_free_rx_descs(struct wm_softc *sc, struct wm_rxqueue *rxq)
6307 {
6308 
6309 	bus_dmamap_unload(sc->sc_dmat, rxq->rxq_desc_dmamap);
6310 	bus_dmamap_destroy(sc->sc_dmat, rxq->rxq_desc_dmamap);
6311 	bus_dmamem_unmap(sc->sc_dmat, (void *)rxq->rxq_descs_u,
6312 	    rxq->rxq_descsize * rxq->rxq_ndesc);
6313 	bus_dmamem_free(sc->sc_dmat, &rxq->rxq_desc_seg, rxq->rxq_desc_rseg);
6314 }
6315 
6316 
6317 static int
6318 wm_alloc_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6319 {
6320 	int i, error;
6321 
6322 	/* Create the transmit buffer DMA maps. */
6323 	WM_TXQUEUELEN(txq) =
6324 	    (sc->sc_type == WM_T_82547 || sc->sc_type == WM_T_82547_2) ?
6325 	    WM_TXQUEUELEN_MAX_82547 : WM_TXQUEUELEN_MAX;
6326 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6327 		if ((error = bus_dmamap_create(sc->sc_dmat, WM_MAXTXDMA,
6328 			    WM_NTXSEGS, WTX_MAX_LEN, 0, 0,
6329 			    &txq->txq_soft[i].txs_dmamap)) != 0) {
6330 			aprint_error_dev(sc->sc_dev,
6331 			    "unable to create Tx DMA map %d, error = %d\n",
6332 			    i, error);
6333 			goto fail;
6334 		}
6335 	}
6336 
6337 	return 0;
6338 
6339  fail:
6340 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6341 		if (txq->txq_soft[i].txs_dmamap != NULL)
6342 			bus_dmamap_destroy(sc->sc_dmat,
6343 			    txq->txq_soft[i].txs_dmamap);
6344 	}
6345 	return error;
6346 }
6347 
6348 static void
6349 wm_free_tx_buffer(struct wm_softc *sc, struct wm_txqueue *txq)
6350 {
6351 	int i;
6352 
6353 	for (i = 0; i < WM_TXQUEUELEN(txq); i++) {
6354 		if (txq->txq_soft[i].txs_dmamap != NULL)
6355 			bus_dmamap_destroy(sc->sc_dmat,
6356 			    txq->txq_soft[i].txs_dmamap);
6357 	}
6358 }
6359 
6360 static int
6361 wm_alloc_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6362 {
6363 	int i, error;
6364 
6365 	/* Create the receive buffer DMA maps. */
6366 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6367 		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
6368 			    MCLBYTES, 0, 0,
6369 			    &rxq->rxq_soft[i].rxs_dmamap)) != 0) {
6370 			aprint_error_dev(sc->sc_dev,
6371 			    "unable to create Rx DMA map %d error = %d\n",
6372 			    i, error);
6373 			goto fail;
6374 		}
6375 		rxq->rxq_soft[i].rxs_mbuf = NULL;
6376 	}
6377 
6378 	return 0;
6379 
6380  fail:
6381 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6382 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6383 			bus_dmamap_destroy(sc->sc_dmat,
6384 			    rxq->rxq_soft[i].rxs_dmamap);
6385 	}
6386 	return error;
6387 }
6388 
6389 static void
6390 wm_free_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6391 {
6392 	int i;
6393 
6394 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6395 		if (rxq->rxq_soft[i].rxs_dmamap != NULL)
6396 			bus_dmamap_destroy(sc->sc_dmat,
6397 			    rxq->rxq_soft[i].rxs_dmamap);
6398 	}
6399 }
6400 
6401 /*
6402  * wm_alloc_quques:
6403  *	Allocate {tx,rx}descs and {tx,rx} buffers
6404  */
6405 static int
6406 wm_alloc_txrx_queues(struct wm_softc *sc)
6407 {
6408 	int i, error, tx_done, rx_done;
6409 
6410 	sc->sc_queue = kmem_zalloc(sizeof(struct wm_queue) * sc->sc_nqueues,
6411 	    KM_SLEEP);
6412 	if (sc->sc_queue == NULL) {
6413 		aprint_error_dev(sc->sc_dev,"unable to allocate wm_queue\n");
6414 		error = ENOMEM;
6415 		goto fail_0;
6416 	}
6417 
6418 	/*
6419 	 * For transmission
6420 	 */
6421 	error = 0;
6422 	tx_done = 0;
6423 	for (i = 0; i < sc->sc_nqueues; i++) {
6424 #ifdef WM_EVENT_COUNTERS
6425 		int j;
6426 		const char *xname;
6427 #endif
6428 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6429 		txq->txq_sc = sc;
6430 		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6431 
6432 		error = wm_alloc_tx_descs(sc, txq);
6433 		if (error)
6434 			break;
6435 		error = wm_alloc_tx_buffer(sc, txq);
6436 		if (error) {
6437 			wm_free_tx_descs(sc, txq);
6438 			break;
6439 		}
6440 		txq->txq_interq = pcq_create(WM_TXINTERQSIZE, KM_SLEEP);
6441 		if (txq->txq_interq == NULL) {
6442 			wm_free_tx_descs(sc, txq);
6443 			wm_free_tx_buffer(sc, txq);
6444 			error = ENOMEM;
6445 			break;
6446 		}
6447 
6448 #ifdef WM_EVENT_COUNTERS
6449 		xname = device_xname(sc->sc_dev);
6450 
6451 		WM_Q_MISC_EVCNT_ATTACH(txq, txsstall, txq, i, xname);
6452 		WM_Q_MISC_EVCNT_ATTACH(txq, txdstall, txq, i, xname);
6453 		WM_Q_MISC_EVCNT_ATTACH(txq, txfifo_stall, txq, i, xname);
6454 		WM_Q_INTR_EVCNT_ATTACH(txq, txdw, txq, i, xname);
6455 		WM_Q_INTR_EVCNT_ATTACH(txq, txqe, txq, i, xname);
6456 
6457 		WM_Q_MISC_EVCNT_ATTACH(txq, txipsum, txq, i, xname);
6458 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum, txq, i, xname);
6459 		WM_Q_MISC_EVCNT_ATTACH(txq, txtusum6, txq, i, xname);
6460 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso, txq, i, xname);
6461 		WM_Q_MISC_EVCNT_ATTACH(txq, txtso6, txq, i, xname);
6462 		WM_Q_MISC_EVCNT_ATTACH(txq, txtsopain, txq, i, xname);
6463 
6464 		for (j = 0; j < WM_NTXSEGS; j++) {
6465 			snprintf(txq->txq_txseg_evcnt_names[j],
6466 			    sizeof(txq->txq_txseg_evcnt_names[j]), "txq%02dtxseg%d", i, j);
6467 			evcnt_attach_dynamic(&txq->txq_ev_txseg[j], EVCNT_TYPE_MISC,
6468 			    NULL, xname, txq->txq_txseg_evcnt_names[j]);
6469 		}
6470 
6471 		WM_Q_MISC_EVCNT_ATTACH(txq, txdrop, txq, i, xname);
6472 
6473 		WM_Q_MISC_EVCNT_ATTACH(txq, tu, txq, i, xname);
6474 #endif /* WM_EVENT_COUNTERS */
6475 
6476 		tx_done++;
6477 	}
6478 	if (error)
6479 		goto fail_1;
6480 
6481 	/*
6482 	 * For recieve
6483 	 */
6484 	error = 0;
6485 	rx_done = 0;
6486 	for (i = 0; i < sc->sc_nqueues; i++) {
6487 #ifdef WM_EVENT_COUNTERS
6488 		const char *xname;
6489 #endif
6490 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6491 		rxq->rxq_sc = sc;
6492 		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
6493 
6494 		error = wm_alloc_rx_descs(sc, rxq);
6495 		if (error)
6496 			break;
6497 
6498 		error = wm_alloc_rx_buffer(sc, rxq);
6499 		if (error) {
6500 			wm_free_rx_descs(sc, rxq);
6501 			break;
6502 		}
6503 
6504 #ifdef WM_EVENT_COUNTERS
6505 		xname = device_xname(sc->sc_dev);
6506 
6507 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxintr, rxq, i, xname);
6508 		WM_Q_INTR_EVCNT_ATTACH(rxq, rxdefer, rxq, i, xname);
6509 
6510 		WM_Q_MISC_EVCNT_ATTACH(rxq, rxipsum, rxq, i, xname);
6511 		WM_Q_MISC_EVCNT_ATTACH(rxq, rxtusum, rxq, i, xname);
6512 #endif /* WM_EVENT_COUNTERS */
6513 
6514 		rx_done++;
6515 	}
6516 	if (error)
6517 		goto fail_2;
6518 
6519 	return 0;
6520 
6521  fail_2:
6522 	for (i = 0; i < rx_done; i++) {
6523 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6524 		wm_free_rx_buffer(sc, rxq);
6525 		wm_free_rx_descs(sc, rxq);
6526 		if (rxq->rxq_lock)
6527 			mutex_obj_free(rxq->rxq_lock);
6528 	}
6529  fail_1:
6530 	for (i = 0; i < tx_done; i++) {
6531 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6532 		pcq_destroy(txq->txq_interq);
6533 		wm_free_tx_buffer(sc, txq);
6534 		wm_free_tx_descs(sc, txq);
6535 		if (txq->txq_lock)
6536 			mutex_obj_free(txq->txq_lock);
6537 	}
6538 
6539 	kmem_free(sc->sc_queue,
6540 	    sizeof(struct wm_queue) * sc->sc_nqueues);
6541  fail_0:
6542 	return error;
6543 }
6544 
6545 /*
6546  * wm_free_quques:
6547  *	Free {tx,rx}descs and {tx,rx} buffers
6548  */
6549 static void
6550 wm_free_txrx_queues(struct wm_softc *sc)
6551 {
6552 	int i;
6553 
6554 	for (i = 0; i < sc->sc_nqueues; i++) {
6555 		struct wm_rxqueue *rxq = &sc->sc_queue[i].wmq_rxq;
6556 
6557 #ifdef WM_EVENT_COUNTERS
6558 		WM_Q_EVCNT_DETACH(rxq, rxintr, rxq, i);
6559 		WM_Q_EVCNT_DETACH(rxq, rxdefer, rxq, i);
6560 		WM_Q_EVCNT_DETACH(rxq, rxipsum, rxq, i);
6561 		WM_Q_EVCNT_DETACH(rxq, rxtusum, rxq, i);
6562 #endif /* WM_EVENT_COUNTERS */
6563 
6564 		wm_free_rx_buffer(sc, rxq);
6565 		wm_free_rx_descs(sc, rxq);
6566 		if (rxq->rxq_lock)
6567 			mutex_obj_free(rxq->rxq_lock);
6568 	}
6569 
6570 	for (i = 0; i < sc->sc_nqueues; i++) {
6571 		struct wm_txqueue *txq = &sc->sc_queue[i].wmq_txq;
6572 		struct mbuf *m;
6573 #ifdef WM_EVENT_COUNTERS
6574 		int j;
6575 
6576 		WM_Q_EVCNT_DETACH(txq, txsstall, txq, i);
6577 		WM_Q_EVCNT_DETACH(txq, txdstall, txq, i);
6578 		WM_Q_EVCNT_DETACH(txq, txfifo_stall, txq, i);
6579 		WM_Q_EVCNT_DETACH(txq, txdw, txq, i);
6580 		WM_Q_EVCNT_DETACH(txq, txqe, txq, i);
6581 		WM_Q_EVCNT_DETACH(txq, txipsum, txq, i);
6582 		WM_Q_EVCNT_DETACH(txq, txtusum, txq, i);
6583 		WM_Q_EVCNT_DETACH(txq, txtusum6, txq, i);
6584 		WM_Q_EVCNT_DETACH(txq, txtso, txq, i);
6585 		WM_Q_EVCNT_DETACH(txq, txtso6, txq, i);
6586 		WM_Q_EVCNT_DETACH(txq, txtsopain, txq, i);
6587 
6588 		for (j = 0; j < WM_NTXSEGS; j++)
6589 			evcnt_detach(&txq->txq_ev_txseg[j]);
6590 
6591 		WM_Q_EVCNT_DETACH(txq, txdrop, txq, i);
6592 		WM_Q_EVCNT_DETACH(txq, tu, txq, i);
6593 #endif /* WM_EVENT_COUNTERS */
6594 
6595 		/* drain txq_interq */
6596 		while ((m = pcq_get(txq->txq_interq)) != NULL)
6597 			m_freem(m);
6598 		pcq_destroy(txq->txq_interq);
6599 
6600 		wm_free_tx_buffer(sc, txq);
6601 		wm_free_tx_descs(sc, txq);
6602 		if (txq->txq_lock)
6603 			mutex_obj_free(txq->txq_lock);
6604 	}
6605 
6606 	kmem_free(sc->sc_queue, sizeof(struct wm_queue) * sc->sc_nqueues);
6607 }
6608 
6609 static void
6610 wm_init_tx_descs(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6611 {
6612 
6613 	KASSERT(mutex_owned(txq->txq_lock));
6614 
6615 	/* Initialize the transmit descriptor ring. */
6616 	memset(txq->txq_descs, 0, WM_TXDESCS_SIZE(txq));
6617 	wm_cdtxsync(txq, 0, WM_NTXDESC(txq),
6618 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
6619 	txq->txq_free = WM_NTXDESC(txq);
6620 	txq->txq_next = 0;
6621 }
6622 
6623 static void
6624 wm_init_tx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6625     struct wm_txqueue *txq)
6626 {
6627 
6628 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6629 		device_xname(sc->sc_dev), __func__));
6630 	KASSERT(mutex_owned(txq->txq_lock));
6631 
6632 	if (sc->sc_type < WM_T_82543) {
6633 		CSR_WRITE(sc, WMREG_OLD_TDBAH, WM_CDTXADDR_HI(txq, 0));
6634 		CSR_WRITE(sc, WMREG_OLD_TDBAL, WM_CDTXADDR_LO(txq, 0));
6635 		CSR_WRITE(sc, WMREG_OLD_TDLEN, WM_TXDESCS_SIZE(txq));
6636 		CSR_WRITE(sc, WMREG_OLD_TDH, 0);
6637 		CSR_WRITE(sc, WMREG_OLD_TDT, 0);
6638 		CSR_WRITE(sc, WMREG_OLD_TIDV, 128);
6639 	} else {
6640 		int qid = wmq->wmq_id;
6641 
6642 		CSR_WRITE(sc, WMREG_TDBAH(qid), WM_CDTXADDR_HI(txq, 0));
6643 		CSR_WRITE(sc, WMREG_TDBAL(qid), WM_CDTXADDR_LO(txq, 0));
6644 		CSR_WRITE(sc, WMREG_TDLEN(qid), WM_TXDESCS_SIZE(txq));
6645 		CSR_WRITE(sc, WMREG_TDH(qid), 0);
6646 
6647 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
6648 			/*
6649 			 * Don't write TDT before TCTL.EN is set.
6650 			 * See the document.
6651 			 */
6652 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_QUEUE_ENABLE
6653 			    | TXDCTL_PTHRESH(0) | TXDCTL_HTHRESH(0)
6654 			    | TXDCTL_WTHRESH(0));
6655 		else {
6656 			/* XXX should update with AIM? */
6657 			CSR_WRITE(sc, WMREG_TIDV, wmq->wmq_itr / 4);
6658 			if (sc->sc_type >= WM_T_82540) {
6659 				/* should be same */
6660 				CSR_WRITE(sc, WMREG_TADV, wmq->wmq_itr / 4);
6661 			}
6662 
6663 			CSR_WRITE(sc, WMREG_TDT(qid), 0);
6664 			CSR_WRITE(sc, WMREG_TXDCTL(qid), TXDCTL_PTHRESH(0) |
6665 			    TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0));
6666 		}
6667 	}
6668 }
6669 
6670 static void
6671 wm_init_tx_buffer(struct wm_softc *sc __unused, struct wm_txqueue *txq)
6672 {
6673 	int i;
6674 
6675 	KASSERT(mutex_owned(txq->txq_lock));
6676 
6677 	/* Initialize the transmit job descriptors. */
6678 	for (i = 0; i < WM_TXQUEUELEN(txq); i++)
6679 		txq->txq_soft[i].txs_mbuf = NULL;
6680 	txq->txq_sfree = WM_TXQUEUELEN(txq);
6681 	txq->txq_snext = 0;
6682 	txq->txq_sdirty = 0;
6683 }
6684 
6685 static void
6686 wm_init_tx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6687     struct wm_txqueue *txq)
6688 {
6689 
6690 	KASSERT(mutex_owned(txq->txq_lock));
6691 
6692 	/*
6693 	 * Set up some register offsets that are different between
6694 	 * the i82542 and the i82543 and later chips.
6695 	 */
6696 	if (sc->sc_type < WM_T_82543)
6697 		txq->txq_tdt_reg = WMREG_OLD_TDT;
6698 	else
6699 		txq->txq_tdt_reg = WMREG_TDT(wmq->wmq_id);
6700 
6701 	wm_init_tx_descs(sc, txq);
6702 	wm_init_tx_regs(sc, wmq, txq);
6703 	wm_init_tx_buffer(sc, txq);
6704 
6705 	txq->txq_flags = 0; /* Clear WM_TXQ_NO_SPACE */
6706 	txq->txq_sending = false;
6707 }
6708 
6709 static void
6710 wm_init_rx_regs(struct wm_softc *sc, struct wm_queue *wmq,
6711     struct wm_rxqueue *rxq)
6712 {
6713 
6714 	KASSERT(mutex_owned(rxq->rxq_lock));
6715 
6716 	/*
6717 	 * Initialize the receive descriptor and receive job
6718 	 * descriptor rings.
6719 	 */
6720 	if (sc->sc_type < WM_T_82543) {
6721 		CSR_WRITE(sc, WMREG_OLD_RDBAH0, WM_CDRXADDR_HI(rxq, 0));
6722 		CSR_WRITE(sc, WMREG_OLD_RDBAL0, WM_CDRXADDR_LO(rxq, 0));
6723 		CSR_WRITE(sc, WMREG_OLD_RDLEN0,
6724 		    rxq->rxq_descsize * rxq->rxq_ndesc);
6725 		CSR_WRITE(sc, WMREG_OLD_RDH0, 0);
6726 		CSR_WRITE(sc, WMREG_OLD_RDT0, 0);
6727 		CSR_WRITE(sc, WMREG_OLD_RDTR0, 28 | RDTR_FPD);
6728 
6729 		CSR_WRITE(sc, WMREG_OLD_RDBA1_HI, 0);
6730 		CSR_WRITE(sc, WMREG_OLD_RDBA1_LO, 0);
6731 		CSR_WRITE(sc, WMREG_OLD_RDLEN1, 0);
6732 		CSR_WRITE(sc, WMREG_OLD_RDH1, 0);
6733 		CSR_WRITE(sc, WMREG_OLD_RDT1, 0);
6734 		CSR_WRITE(sc, WMREG_OLD_RDTR1, 0);
6735 	} else {
6736 		int qid = wmq->wmq_id;
6737 
6738 		CSR_WRITE(sc, WMREG_RDBAH(qid), WM_CDRXADDR_HI(rxq, 0));
6739 		CSR_WRITE(sc, WMREG_RDBAL(qid), WM_CDRXADDR_LO(rxq, 0));
6740 		CSR_WRITE(sc, WMREG_RDLEN(qid),
6741 		    rxq->rxq_descsize * rxq->rxq_ndesc);
6742 
6743 		if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
6744 			if (MCLBYTES & ((1 << SRRCTL_BSIZEPKT_SHIFT) - 1))
6745 				panic("%s: MCLBYTES %d unsupported for 82575 or higher\n", __func__, MCLBYTES);
6746 
6747 			/* Currently, support SRRCTL_DESCTYPE_ADV_ONEBUF only. */
6748 			CSR_WRITE(sc, WMREG_SRRCTL(qid), SRRCTL_DESCTYPE_ADV_ONEBUF
6749 			    | (MCLBYTES >> SRRCTL_BSIZEPKT_SHIFT));
6750 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_QUEUE_ENABLE
6751 			    | RXDCTL_PTHRESH(16) | RXDCTL_HTHRESH(8)
6752 			    | RXDCTL_WTHRESH(1));
6753 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6754 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6755 		} else {
6756 			CSR_WRITE(sc, WMREG_RDH(qid), 0);
6757 			CSR_WRITE(sc, WMREG_RDT(qid), 0);
6758 			/* XXX should update with AIM? */
6759 			CSR_WRITE(sc, WMREG_RDTR,
6760 			    (wmq->wmq_itr / 4) | RDTR_FPD);
6761 			/* MUST be same */
6762 			CSR_WRITE(sc, WMREG_RADV, wmq->wmq_itr / 4);
6763 			CSR_WRITE(sc, WMREG_RXDCTL(qid), RXDCTL_PTHRESH(0) |
6764 			    RXDCTL_HTHRESH(0) | RXDCTL_WTHRESH(1));
6765 		}
6766 	}
6767 }
6768 
6769 static int
6770 wm_init_rx_buffer(struct wm_softc *sc, struct wm_rxqueue *rxq)
6771 {
6772 	struct wm_rxsoft *rxs;
6773 	int error, i;
6774 
6775 	KASSERT(mutex_owned(rxq->rxq_lock));
6776 
6777 	for (i = 0; i < rxq->rxq_ndesc; i++) {
6778 		rxs = &rxq->rxq_soft[i];
6779 		if (rxs->rxs_mbuf == NULL) {
6780 			if ((error = wm_add_rxbuf(rxq, i)) != 0) {
6781 				log(LOG_ERR, "%s: unable to allocate or map "
6782 				    "rx buffer %d, error = %d\n",
6783 				    device_xname(sc->sc_dev), i, error);
6784 				/*
6785 				 * XXX Should attempt to run with fewer receive
6786 				 * XXX buffers instead of just failing.
6787 				 */
6788 				wm_rxdrain(rxq);
6789 				return ENOMEM;
6790 			}
6791 		} else {
6792 			/*
6793 			 * For 82575 and 82576, the RX descriptors must be
6794 			 * initialized after the setting of RCTL.EN in
6795 			 * wm_set_filter()
6796 			 */
6797 			if ((sc->sc_flags & WM_F_NEWQUEUE) == 0)
6798 				wm_init_rxdesc(rxq, i);
6799 		}
6800 	}
6801 	rxq->rxq_ptr = 0;
6802 	rxq->rxq_discard = 0;
6803 	WM_RXCHAIN_RESET(rxq);
6804 
6805 	return 0;
6806 }
6807 
6808 static int
6809 wm_init_rx_queue(struct wm_softc *sc, struct wm_queue *wmq,
6810     struct wm_rxqueue *rxq)
6811 {
6812 
6813 	KASSERT(mutex_owned(rxq->rxq_lock));
6814 
6815 	/*
6816 	 * Set up some register offsets that are different between
6817 	 * the i82542 and the i82543 and later chips.
6818 	 */
6819 	if (sc->sc_type < WM_T_82543)
6820 		rxq->rxq_rdt_reg = WMREG_OLD_RDT0;
6821 	else
6822 		rxq->rxq_rdt_reg = WMREG_RDT(wmq->wmq_id);
6823 
6824 	wm_init_rx_regs(sc, wmq, rxq);
6825 	return wm_init_rx_buffer(sc, rxq);
6826 }
6827 
6828 /*
6829  * wm_init_quques:
6830  *	Initialize {tx,rx}descs and {tx,rx} buffers
6831  */
6832 static int
6833 wm_init_txrx_queues(struct wm_softc *sc)
6834 {
6835 	int i, error = 0;
6836 
6837 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
6838 		device_xname(sc->sc_dev), __func__));
6839 
6840 	for (i = 0; i < sc->sc_nqueues; i++) {
6841 		struct wm_queue *wmq = &sc->sc_queue[i];
6842 		struct wm_txqueue *txq = &wmq->wmq_txq;
6843 		struct wm_rxqueue *rxq = &wmq->wmq_rxq;
6844 
6845 		/*
6846 		 * TODO
6847 		 * Currently, use constant variable instead of AIM.
6848 		 * Furthermore, the interrupt interval of multiqueue which use
6849 		 * polling mode is less than default value.
6850 		 * More tuning and AIM are required.
6851 		 */
6852 		if (wm_is_using_multiqueue(sc))
6853 			wmq->wmq_itr = 50;
6854 		else
6855 			wmq->wmq_itr = sc->sc_itr_init;
6856 		wmq->wmq_set_itr = true;
6857 
6858 		mutex_enter(txq->txq_lock);
6859 		wm_init_tx_queue(sc, wmq, txq);
6860 		mutex_exit(txq->txq_lock);
6861 
6862 		mutex_enter(rxq->rxq_lock);
6863 		error = wm_init_rx_queue(sc, wmq, rxq);
6864 		mutex_exit(rxq->rxq_lock);
6865 		if (error)
6866 			break;
6867 	}
6868 
6869 	return error;
6870 }
6871 
6872 /*
6873  * wm_tx_offload:
6874  *
6875  *	Set up TCP/IP checksumming parameters for the
6876  *	specified packet.
6877  */
6878 static int
6879 wm_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
6880     struct wm_txsoft *txs, uint32_t *cmdp, uint8_t *fieldsp)
6881 {
6882 	struct mbuf *m0 = txs->txs_mbuf;
6883 	struct livengood_tcpip_ctxdesc *t;
6884 	uint32_t ipcs, tucs, cmd, cmdlen, seg;
6885 	uint32_t ipcse;
6886 	struct ether_header *eh;
6887 	int offset, iphl;
6888 	uint8_t fields;
6889 
6890 	/*
6891 	 * XXX It would be nice if the mbuf pkthdr had offset
6892 	 * fields for the protocol headers.
6893 	 */
6894 
6895 	eh = mtod(m0, struct ether_header *);
6896 	switch (htons(eh->ether_type)) {
6897 	case ETHERTYPE_IP:
6898 	case ETHERTYPE_IPV6:
6899 		offset = ETHER_HDR_LEN;
6900 		break;
6901 
6902 	case ETHERTYPE_VLAN:
6903 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
6904 		break;
6905 
6906 	default:
6907 		/*
6908 		 * Don't support this protocol or encapsulation.
6909 		 */
6910 		*fieldsp = 0;
6911 		*cmdp = 0;
6912 		return 0;
6913 	}
6914 
6915 	if ((m0->m_pkthdr.csum_flags &
6916 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
6917 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
6918 	} else {
6919 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
6920 	}
6921 	ipcse = offset + iphl - 1;
6922 
6923 	cmd = WTX_CMD_DEXT | WTX_DTYP_D;
6924 	cmdlen = WTX_CMD_DEXT | WTX_DTYP_C | WTX_CMD_IDE;
6925 	seg = 0;
6926 	fields = 0;
6927 
6928 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
6929 		int hlen = offset + iphl;
6930 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
6931 
6932 		if (__predict_false(m0->m_len <
6933 				    (hlen + sizeof(struct tcphdr)))) {
6934 			/*
6935 			 * TCP/IP headers are not in the first mbuf; we need
6936 			 * to do this the slow and painful way. Let's just
6937 			 * hope this doesn't happen very often.
6938 			 */
6939 			struct tcphdr th;
6940 
6941 			WM_Q_EVCNT_INCR(txq, txtsopain);
6942 
6943 			m_copydata(m0, hlen, sizeof(th), &th);
6944 			if (v4) {
6945 				struct ip ip;
6946 
6947 				m_copydata(m0, offset, sizeof(ip), &ip);
6948 				ip.ip_len = 0;
6949 				m_copyback(m0,
6950 				    offset + offsetof(struct ip, ip_len),
6951 				    sizeof(ip.ip_len), &ip.ip_len);
6952 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
6953 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
6954 			} else {
6955 				struct ip6_hdr ip6;
6956 
6957 				m_copydata(m0, offset, sizeof(ip6), &ip6);
6958 				ip6.ip6_plen = 0;
6959 				m_copyback(m0,
6960 				    offset + offsetof(struct ip6_hdr, ip6_plen),
6961 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
6962 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
6963 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
6964 			}
6965 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
6966 			    sizeof(th.th_sum), &th.th_sum);
6967 
6968 			hlen += th.th_off << 2;
6969 		} else {
6970 			/*
6971 			 * TCP/IP headers are in the first mbuf; we can do
6972 			 * this the easy way.
6973 			 */
6974 			struct tcphdr *th;
6975 
6976 			if (v4) {
6977 				struct ip *ip =
6978 				    (void *)(mtod(m0, char *) + offset);
6979 				th = (void *)(mtod(m0, char *) + hlen);
6980 
6981 				ip->ip_len = 0;
6982 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
6983 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
6984 			} else {
6985 				struct ip6_hdr *ip6 =
6986 				    (void *)(mtod(m0, char *) + offset);
6987 				th = (void *)(mtod(m0, char *) + hlen);
6988 
6989 				ip6->ip6_plen = 0;
6990 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
6991 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
6992 			}
6993 			hlen += th->th_off << 2;
6994 		}
6995 
6996 		if (v4) {
6997 			WM_Q_EVCNT_INCR(txq, txtso);
6998 			cmdlen |= WTX_TCPIP_CMD_IP;
6999 		} else {
7000 			WM_Q_EVCNT_INCR(txq, txtso6);
7001 			ipcse = 0;
7002 		}
7003 		cmd |= WTX_TCPIP_CMD_TSE;
7004 		cmdlen |= WTX_TCPIP_CMD_TSE |
7005 		    WTX_TCPIP_CMD_TCP | (m0->m_pkthdr.len - hlen);
7006 		seg = WTX_TCPIP_SEG_HDRLEN(hlen) |
7007 		    WTX_TCPIP_SEG_MSS(m0->m_pkthdr.segsz);
7008 	}
7009 
7010 	/*
7011 	 * NOTE: Even if we're not using the IP or TCP/UDP checksum
7012 	 * offload feature, if we load the context descriptor, we
7013 	 * MUST provide valid values for IPCSS and TUCSS fields.
7014 	 */
7015 
7016 	ipcs = WTX_TCPIP_IPCSS(offset) |
7017 	    WTX_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) |
7018 	    WTX_TCPIP_IPCSE(ipcse);
7019 	if (m0->m_pkthdr.csum_flags & (M_CSUM_IPv4 | M_CSUM_TSOv4)) {
7020 		WM_Q_EVCNT_INCR(txq, txipsum);
7021 		fields |= WTX_IXSM;
7022 	}
7023 
7024 	offset += iphl;
7025 
7026 	if (m0->m_pkthdr.csum_flags &
7027 	    (M_CSUM_TCPv4 | M_CSUM_UDPv4 | M_CSUM_TSOv4)) {
7028 		WM_Q_EVCNT_INCR(txq, txtusum);
7029 		fields |= WTX_TXSM;
7030 		tucs = WTX_TCPIP_TUCSS(offset) |
7031 		    WTX_TCPIP_TUCSO(offset +
7032 			M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) |
7033 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
7034 	} else if ((m0->m_pkthdr.csum_flags &
7035 	    (M_CSUM_TCPv6 | M_CSUM_UDPv6 | M_CSUM_TSOv6)) != 0) {
7036 		WM_Q_EVCNT_INCR(txq, txtusum6);
7037 		fields |= WTX_TXSM;
7038 		tucs = WTX_TCPIP_TUCSS(offset) |
7039 		    WTX_TCPIP_TUCSO(offset +
7040 			M_CSUM_DATA_IPv6_OFFSET(m0->m_pkthdr.csum_data)) |
7041 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
7042 	} else {
7043 		/* Just initialize it to a valid TCP context. */
7044 		tucs = WTX_TCPIP_TUCSS(offset) |
7045 		    WTX_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) |
7046 		    WTX_TCPIP_TUCSE(0) /* rest of packet */;
7047 	}
7048 
7049 	/*
7050 	 * We don't have to write context descriptor for every packet
7051 	 * except for 82574. For 82574, we must write context descriptor
7052 	 * for every packet when we use two descriptor queues.
7053 	 * It would be overhead to write context descriptor for every packet,
7054 	 * however it does not cause problems.
7055 	 */
7056 	/* Fill in the context descriptor. */
7057 	t = (struct livengood_tcpip_ctxdesc *)
7058 	    &txq->txq_descs[txq->txq_next];
7059 	t->tcpip_ipcs = htole32(ipcs);
7060 	t->tcpip_tucs = htole32(tucs);
7061 	t->tcpip_cmdlen = htole32(cmdlen);
7062 	t->tcpip_seg = htole32(seg);
7063 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7064 
7065 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7066 	txs->txs_ndesc++;
7067 
7068 	*cmdp = cmd;
7069 	*fieldsp = fields;
7070 
7071 	return 0;
7072 }
7073 
7074 static inline int
7075 wm_select_txqueue(struct ifnet *ifp, struct mbuf *m)
7076 {
7077 	struct wm_softc *sc = ifp->if_softc;
7078 	u_int cpuid = cpu_index(curcpu());
7079 
7080 	/*
7081 	 * Currently, simple distribute strategy.
7082 	 * TODO:
7083 	 * distribute by flowid(RSS has value).
7084 	 */
7085 	return (cpuid + ncpu - sc->sc_affinity_offset) % sc->sc_nqueues;
7086 }
7087 
7088 /*
7089  * wm_start:		[ifnet interface function]
7090  *
7091  *	Start packet transmission on the interface.
7092  */
7093 static void
7094 wm_start(struct ifnet *ifp)
7095 {
7096 	struct wm_softc *sc = ifp->if_softc;
7097 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7098 
7099 #ifdef WM_MPSAFE
7100 	KASSERT(if_is_mpsafe(ifp));
7101 #endif
7102 	/*
7103 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
7104 	 */
7105 
7106 	mutex_enter(txq->txq_lock);
7107 	if (!txq->txq_stopping)
7108 		wm_start_locked(ifp);
7109 	mutex_exit(txq->txq_lock);
7110 }
7111 
7112 static void
7113 wm_start_locked(struct ifnet *ifp)
7114 {
7115 	struct wm_softc *sc = ifp->if_softc;
7116 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7117 
7118 	wm_send_common_locked(ifp, txq, false);
7119 }
7120 
7121 static int
7122 wm_transmit(struct ifnet *ifp, struct mbuf *m)
7123 {
7124 	int qid;
7125 	struct wm_softc *sc = ifp->if_softc;
7126 	struct wm_txqueue *txq;
7127 
7128 	qid = wm_select_txqueue(ifp, m);
7129 	txq = &sc->sc_queue[qid].wmq_txq;
7130 
7131 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7132 		m_freem(m);
7133 		WM_Q_EVCNT_INCR(txq, txdrop);
7134 		return ENOBUFS;
7135 	}
7136 
7137 	/*
7138 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
7139 	 */
7140 	ifp->if_obytes += m->m_pkthdr.len;
7141 	if (m->m_flags & M_MCAST)
7142 		ifp->if_omcasts++;
7143 
7144 	if (mutex_tryenter(txq->txq_lock)) {
7145 		if (!txq->txq_stopping)
7146 			wm_transmit_locked(ifp, txq);
7147 		mutex_exit(txq->txq_lock);
7148 	}
7149 
7150 	return 0;
7151 }
7152 
7153 static void
7154 wm_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7155 {
7156 
7157 	wm_send_common_locked(ifp, txq, true);
7158 }
7159 
7160 static void
7161 wm_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7162     bool is_transmit)
7163 {
7164 	struct wm_softc *sc = ifp->if_softc;
7165 	struct mbuf *m0;
7166 	struct wm_txsoft *txs;
7167 	bus_dmamap_t dmamap;
7168 	int error, nexttx, lasttx = -1, ofree, seg, segs_needed, use_tso;
7169 	bus_addr_t curaddr;
7170 	bus_size_t seglen, curlen;
7171 	uint32_t cksumcmd;
7172 	uint8_t cksumfields;
7173 
7174 	KASSERT(mutex_owned(txq->txq_lock));
7175 
7176 	if ((ifp->if_flags & IFF_RUNNING) == 0)
7177 		return;
7178 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
7179 		return;
7180 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7181 		return;
7182 
7183 	/* Remember the previous number of free descriptors. */
7184 	ofree = txq->txq_free;
7185 
7186 	/*
7187 	 * Loop through the send queue, setting up transmit descriptors
7188 	 * until we drain the queue, or use up all available transmit
7189 	 * descriptors.
7190 	 */
7191 	for (;;) {
7192 		m0 = NULL;
7193 
7194 		/* Get a work queue entry. */
7195 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7196 			wm_txeof(txq, UINT_MAX);
7197 			if (txq->txq_sfree == 0) {
7198 				DPRINTF(WM_DEBUG_TX,
7199 				    ("%s: TX: no free job descriptors\n",
7200 					device_xname(sc->sc_dev)));
7201 				WM_Q_EVCNT_INCR(txq, txsstall);
7202 				break;
7203 			}
7204 		}
7205 
7206 		/* Grab a packet off the queue. */
7207 		if (is_transmit)
7208 			m0 = pcq_get(txq->txq_interq);
7209 		else
7210 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7211 		if (m0 == NULL)
7212 			break;
7213 
7214 		DPRINTF(WM_DEBUG_TX,
7215 		    ("%s: TX: have packet to transmit: %p\n",
7216 			device_xname(sc->sc_dev), m0));
7217 
7218 		txs = &txq->txq_soft[txq->txq_snext];
7219 		dmamap = txs->txs_dmamap;
7220 
7221 		use_tso = (m0->m_pkthdr.csum_flags &
7222 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0;
7223 
7224 		/*
7225 		 * So says the Linux driver:
7226 		 * The controller does a simple calculation to make sure
7227 		 * there is enough room in the FIFO before initiating the
7228 		 * DMA for each buffer. The calc is:
7229 		 *	4 = ceil(buffer len / MSS)
7230 		 * To make sure we don't overrun the FIFO, adjust the max
7231 		 * buffer len if the MSS drops.
7232 		 */
7233 		dmamap->dm_maxsegsz =
7234 		    (use_tso && (m0->m_pkthdr.segsz << 2) < WTX_MAX_LEN)
7235 		    ? m0->m_pkthdr.segsz << 2
7236 		    : WTX_MAX_LEN;
7237 
7238 		/*
7239 		 * Load the DMA map.  If this fails, the packet either
7240 		 * didn't fit in the allotted number of segments, or we
7241 		 * were short on resources.  For the too-many-segments
7242 		 * case, we simply report an error and drop the packet,
7243 		 * since we can't sanely copy a jumbo packet to a single
7244 		 * buffer.
7245 		 */
7246 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7247 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7248 		if (error) {
7249 			if (error == EFBIG) {
7250 				WM_Q_EVCNT_INCR(txq, txdrop);
7251 				log(LOG_ERR, "%s: Tx packet consumes too many "
7252 				    "DMA segments, dropping...\n",
7253 				    device_xname(sc->sc_dev));
7254 				wm_dump_mbuf_chain(sc, m0);
7255 				m_freem(m0);
7256 				continue;
7257 			}
7258 			/*  Short on resources, just stop for now. */
7259 			DPRINTF(WM_DEBUG_TX,
7260 			    ("%s: TX: dmamap load failed: %d\n",
7261 				device_xname(sc->sc_dev), error));
7262 			break;
7263 		}
7264 
7265 		segs_needed = dmamap->dm_nsegs;
7266 		if (use_tso) {
7267 			/* For sentinel descriptor; see below. */
7268 			segs_needed++;
7269 		}
7270 
7271 		/*
7272 		 * Ensure we have enough descriptors free to describe
7273 		 * the packet. Note, we always reserve one descriptor
7274 		 * at the end of the ring due to the semantics of the
7275 		 * TDT register, plus one more in the event we need
7276 		 * to load offload context.
7277 		 */
7278 		if (segs_needed > txq->txq_free - 2) {
7279 			/*
7280 			 * Not enough free descriptors to transmit this
7281 			 * packet.  We haven't committed anything yet,
7282 			 * so just unload the DMA map, put the packet
7283 			 * pack on the queue, and punt. Notify the upper
7284 			 * layer that there are no more slots left.
7285 			 */
7286 			DPRINTF(WM_DEBUG_TX,
7287 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7288 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
7289 				segs_needed, txq->txq_free - 1));
7290 			if (!is_transmit)
7291 				ifp->if_flags |= IFF_OACTIVE;
7292 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7293 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7294 			WM_Q_EVCNT_INCR(txq, txdstall);
7295 			break;
7296 		}
7297 
7298 		/*
7299 		 * Check for 82547 Tx FIFO bug. We need to do this
7300 		 * once we know we can transmit the packet, since we
7301 		 * do some internal FIFO space accounting here.
7302 		 */
7303 		if (sc->sc_type == WM_T_82547 &&
7304 		    wm_82547_txfifo_bugchk(sc, m0)) {
7305 			DPRINTF(WM_DEBUG_TX,
7306 			    ("%s: TX: 82547 Tx FIFO bug detected\n",
7307 				device_xname(sc->sc_dev)));
7308 			if (!is_transmit)
7309 				ifp->if_flags |= IFF_OACTIVE;
7310 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7311 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7312 			WM_Q_EVCNT_INCR(txq, txfifo_stall);
7313 			break;
7314 		}
7315 
7316 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7317 
7318 		DPRINTF(WM_DEBUG_TX,
7319 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7320 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7321 
7322 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7323 
7324 		/*
7325 		 * Store a pointer to the packet so that we can free it
7326 		 * later.
7327 		 *
7328 		 * Initially, we consider the number of descriptors the
7329 		 * packet uses the number of DMA segments.  This may be
7330 		 * incremented by 1 if we do checksum offload (a descriptor
7331 		 * is used to set the checksum context).
7332 		 */
7333 		txs->txs_mbuf = m0;
7334 		txs->txs_firstdesc = txq->txq_next;
7335 		txs->txs_ndesc = segs_needed;
7336 
7337 		/* Set up offload parameters for this packet. */
7338 		if (m0->m_pkthdr.csum_flags &
7339 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7340 		    M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7341 		    M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7342 			if (wm_tx_offload(sc, txq, txs, &cksumcmd,
7343 					  &cksumfields) != 0) {
7344 				/* Error message already displayed. */
7345 				bus_dmamap_unload(sc->sc_dmat, dmamap);
7346 				continue;
7347 			}
7348 		} else {
7349 			cksumcmd = 0;
7350 			cksumfields = 0;
7351 		}
7352 
7353 		cksumcmd |= WTX_CMD_IDE | WTX_CMD_IFCS;
7354 
7355 		/* Sync the DMA map. */
7356 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7357 		    BUS_DMASYNC_PREWRITE);
7358 
7359 		/* Initialize the transmit descriptor. */
7360 		for (nexttx = txq->txq_next, seg = 0;
7361 		     seg < dmamap->dm_nsegs; seg++) {
7362 			for (seglen = dmamap->dm_segs[seg].ds_len,
7363 			     curaddr = dmamap->dm_segs[seg].ds_addr;
7364 			     seglen != 0;
7365 			     curaddr += curlen, seglen -= curlen,
7366 			     nexttx = WM_NEXTTX(txq, nexttx)) {
7367 				curlen = seglen;
7368 
7369 				/*
7370 				 * So says the Linux driver:
7371 				 * Work around for premature descriptor
7372 				 * write-backs in TSO mode.  Append a
7373 				 * 4-byte sentinel descriptor.
7374 				 */
7375 				if (use_tso && seg == dmamap->dm_nsegs - 1 &&
7376 				    curlen > 8)
7377 					curlen -= 4;
7378 
7379 				wm_set_dma_addr(
7380 				    &txq->txq_descs[nexttx].wtx_addr, curaddr);
7381 				txq->txq_descs[nexttx].wtx_cmdlen
7382 				    = htole32(cksumcmd | curlen);
7383 				txq->txq_descs[nexttx].wtx_fields.wtxu_status
7384 				    = 0;
7385 				txq->txq_descs[nexttx].wtx_fields.wtxu_options
7386 				    = cksumfields;
7387 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7388 				lasttx = nexttx;
7389 
7390 				DPRINTF(WM_DEBUG_TX,
7391 				    ("%s: TX: desc %d: low %#" PRIx64 ", "
7392 					"len %#04zx\n",
7393 					device_xname(sc->sc_dev), nexttx,
7394 					(uint64_t)curaddr, curlen));
7395 			}
7396 		}
7397 
7398 		KASSERT(lasttx != -1);
7399 
7400 		/*
7401 		 * Set up the command byte on the last descriptor of
7402 		 * the packet. If we're in the interrupt delay window,
7403 		 * delay the interrupt.
7404 		 */
7405 		txq->txq_descs[lasttx].wtx_cmdlen |=
7406 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7407 
7408 		/*
7409 		 * If VLANs are enabled and the packet has a VLAN tag, set
7410 		 * up the descriptor to encapsulate the packet for us.
7411 		 *
7412 		 * This is only valid on the last descriptor of the packet.
7413 		 */
7414 		if (vlan_has_tag(m0)) {
7415 			txq->txq_descs[lasttx].wtx_cmdlen |=
7416 			    htole32(WTX_CMD_VLE);
7417 			txq->txq_descs[lasttx].wtx_fields.wtxu_vlan
7418 			    = htole16(vlan_get_tag(m0));
7419 		}
7420 
7421 		txs->txs_lastdesc = lasttx;
7422 
7423 		DPRINTF(WM_DEBUG_TX,
7424 		    ("%s: TX: desc %d: cmdlen 0x%08x\n",
7425 			device_xname(sc->sc_dev),
7426 			lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7427 
7428 		/* Sync the descriptors we're using. */
7429 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
7430 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
7431 
7432 		/* Give the packet to the chip. */
7433 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
7434 
7435 		DPRINTF(WM_DEBUG_TX,
7436 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
7437 
7438 		DPRINTF(WM_DEBUG_TX,
7439 		    ("%s: TX: finished transmitting packet, job %d\n",
7440 			device_xname(sc->sc_dev), txq->txq_snext));
7441 
7442 		/* Advance the tx pointer. */
7443 		txq->txq_free -= txs->txs_ndesc;
7444 		txq->txq_next = nexttx;
7445 
7446 		txq->txq_sfree--;
7447 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
7448 
7449 		/* Pass the packet to any BPF listeners. */
7450 		bpf_mtap(ifp, m0, BPF_D_OUT);
7451 	}
7452 
7453 	if (m0 != NULL) {
7454 		if (!is_transmit)
7455 			ifp->if_flags |= IFF_OACTIVE;
7456 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7457 		WM_Q_EVCNT_INCR(txq, txdrop);
7458 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
7459 			__func__));
7460 		m_freem(m0);
7461 	}
7462 
7463 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
7464 		/* No more slots; notify upper layer. */
7465 		if (!is_transmit)
7466 			ifp->if_flags |= IFF_OACTIVE;
7467 		txq->txq_flags |= WM_TXQ_NO_SPACE;
7468 	}
7469 
7470 	if (txq->txq_free != ofree) {
7471 		/* Set a watchdog timer in case the chip flakes out. */
7472 		txq->txq_lastsent = time_uptime;
7473 		txq->txq_sending = true;
7474 	}
7475 }
7476 
7477 /*
7478  * wm_nq_tx_offload:
7479  *
7480  *	Set up TCP/IP checksumming parameters for the
7481  *	specified packet, for NEWQUEUE devices
7482  */
7483 static int
7484 wm_nq_tx_offload(struct wm_softc *sc, struct wm_txqueue *txq,
7485     struct wm_txsoft *txs, uint32_t *cmdlenp, uint32_t *fieldsp, bool *do_csum)
7486 {
7487 	struct mbuf *m0 = txs->txs_mbuf;
7488 	uint32_t vl_len, mssidx, cmdc;
7489 	struct ether_header *eh;
7490 	int offset, iphl;
7491 
7492 	/*
7493 	 * XXX It would be nice if the mbuf pkthdr had offset
7494 	 * fields for the protocol headers.
7495 	 */
7496 	*cmdlenp = 0;
7497 	*fieldsp = 0;
7498 
7499 	eh = mtod(m0, struct ether_header *);
7500 	switch (htons(eh->ether_type)) {
7501 	case ETHERTYPE_IP:
7502 	case ETHERTYPE_IPV6:
7503 		offset = ETHER_HDR_LEN;
7504 		break;
7505 
7506 	case ETHERTYPE_VLAN:
7507 		offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
7508 		break;
7509 
7510 	default:
7511 		/* Don't support this protocol or encapsulation. */
7512 		*do_csum = false;
7513 		return 0;
7514 	}
7515 	*do_csum = true;
7516 	*cmdlenp = NQTX_DTYP_D | NQTX_CMD_DEXT | NQTX_CMD_IFCS;
7517 	cmdc = NQTX_DTYP_C | NQTX_CMD_DEXT;
7518 
7519 	vl_len = (offset << NQTXC_VLLEN_MACLEN_SHIFT);
7520 	KASSERT((offset & ~NQTXC_VLLEN_MACLEN_MASK) == 0);
7521 
7522 	if ((m0->m_pkthdr.csum_flags &
7523 	    (M_CSUM_TSOv4 | M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_IPv4)) != 0) {
7524 		iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data);
7525 	} else {
7526 		iphl = M_CSUM_DATA_IPv6_IPHL(m0->m_pkthdr.csum_data);
7527 	}
7528 	vl_len |= (iphl << NQTXC_VLLEN_IPLEN_SHIFT);
7529 	KASSERT((iphl & ~NQTXC_VLLEN_IPLEN_MASK) == 0);
7530 
7531 	if (vlan_has_tag(m0)) {
7532 		vl_len |= ((vlan_get_tag(m0) & NQTXC_VLLEN_VLAN_MASK)
7533 		    << NQTXC_VLLEN_VLAN_SHIFT);
7534 		*cmdlenp |= NQTX_CMD_VLE;
7535 	}
7536 
7537 	mssidx = 0;
7538 
7539 	if ((m0->m_pkthdr.csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
7540 		int hlen = offset + iphl;
7541 		int tcp_hlen;
7542 		bool v4 = (m0->m_pkthdr.csum_flags & M_CSUM_TSOv4) != 0;
7543 
7544 		if (__predict_false(m0->m_len <
7545 				    (hlen + sizeof(struct tcphdr)))) {
7546 			/*
7547 			 * TCP/IP headers are not in the first mbuf; we need
7548 			 * to do this the slow and painful way. Let's just
7549 			 * hope this doesn't happen very often.
7550 			 */
7551 			struct tcphdr th;
7552 
7553 			WM_Q_EVCNT_INCR(txq, txtsopain);
7554 
7555 			m_copydata(m0, hlen, sizeof(th), &th);
7556 			if (v4) {
7557 				struct ip ip;
7558 
7559 				m_copydata(m0, offset, sizeof(ip), &ip);
7560 				ip.ip_len = 0;
7561 				m_copyback(m0,
7562 				    offset + offsetof(struct ip, ip_len),
7563 				    sizeof(ip.ip_len), &ip.ip_len);
7564 				th.th_sum = in_cksum_phdr(ip.ip_src.s_addr,
7565 				    ip.ip_dst.s_addr, htons(IPPROTO_TCP));
7566 			} else {
7567 				struct ip6_hdr ip6;
7568 
7569 				m_copydata(m0, offset, sizeof(ip6), &ip6);
7570 				ip6.ip6_plen = 0;
7571 				m_copyback(m0,
7572 				    offset + offsetof(struct ip6_hdr, ip6_plen),
7573 				    sizeof(ip6.ip6_plen), &ip6.ip6_plen);
7574 				th.th_sum = in6_cksum_phdr(&ip6.ip6_src,
7575 				    &ip6.ip6_dst, 0, htonl(IPPROTO_TCP));
7576 			}
7577 			m_copyback(m0, hlen + offsetof(struct tcphdr, th_sum),
7578 			    sizeof(th.th_sum), &th.th_sum);
7579 
7580 			tcp_hlen = th.th_off << 2;
7581 		} else {
7582 			/*
7583 			 * TCP/IP headers are in the first mbuf; we can do
7584 			 * this the easy way.
7585 			 */
7586 			struct tcphdr *th;
7587 
7588 			if (v4) {
7589 				struct ip *ip =
7590 				    (void *)(mtod(m0, char *) + offset);
7591 				th = (void *)(mtod(m0, char *) + hlen);
7592 
7593 				ip->ip_len = 0;
7594 				th->th_sum = in_cksum_phdr(ip->ip_src.s_addr,
7595 				    ip->ip_dst.s_addr, htons(IPPROTO_TCP));
7596 			} else {
7597 				struct ip6_hdr *ip6 =
7598 				    (void *)(mtod(m0, char *) + offset);
7599 				th = (void *)(mtod(m0, char *) + hlen);
7600 
7601 				ip6->ip6_plen = 0;
7602 				th->th_sum = in6_cksum_phdr(&ip6->ip6_src,
7603 				    &ip6->ip6_dst, 0, htonl(IPPROTO_TCP));
7604 			}
7605 			tcp_hlen = th->th_off << 2;
7606 		}
7607 		hlen += tcp_hlen;
7608 		*cmdlenp |= NQTX_CMD_TSE;
7609 
7610 		if (v4) {
7611 			WM_Q_EVCNT_INCR(txq, txtso);
7612 			*fieldsp |= NQTXD_FIELDS_IXSM | NQTXD_FIELDS_TUXSM;
7613 		} else {
7614 			WM_Q_EVCNT_INCR(txq, txtso6);
7615 			*fieldsp |= NQTXD_FIELDS_TUXSM;
7616 		}
7617 		*fieldsp |= ((m0->m_pkthdr.len - hlen) << NQTXD_FIELDS_PAYLEN_SHIFT);
7618 		KASSERT(((m0->m_pkthdr.len - hlen) & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7619 		mssidx |= (m0->m_pkthdr.segsz << NQTXC_MSSIDX_MSS_SHIFT);
7620 		KASSERT((m0->m_pkthdr.segsz & ~NQTXC_MSSIDX_MSS_MASK) == 0);
7621 		mssidx |= (tcp_hlen << NQTXC_MSSIDX_L4LEN_SHIFT);
7622 		KASSERT((tcp_hlen & ~NQTXC_MSSIDX_L4LEN_MASK) == 0);
7623 	} else {
7624 		*fieldsp |= (m0->m_pkthdr.len << NQTXD_FIELDS_PAYLEN_SHIFT);
7625 		KASSERT((m0->m_pkthdr.len & ~NQTXD_FIELDS_PAYLEN_MASK) == 0);
7626 	}
7627 
7628 	if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) {
7629 		*fieldsp |= NQTXD_FIELDS_IXSM;
7630 		cmdc |= NQTXC_CMD_IP4;
7631 	}
7632 
7633 	if (m0->m_pkthdr.csum_flags &
7634 	    (M_CSUM_UDPv4 | M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
7635 		WM_Q_EVCNT_INCR(txq, txtusum);
7636 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4 | M_CSUM_TSOv4)) {
7637 			cmdc |= NQTXC_CMD_TCP;
7638 		} else {
7639 			cmdc |= NQTXC_CMD_UDP;
7640 		}
7641 		cmdc |= NQTXC_CMD_IP4;
7642 		*fieldsp |= NQTXD_FIELDS_TUXSM;
7643 	}
7644 	if (m0->m_pkthdr.csum_flags &
7645 	    (M_CSUM_UDPv6 | M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
7646 		WM_Q_EVCNT_INCR(txq, txtusum6);
7647 		if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv6 | M_CSUM_TSOv6)) {
7648 			cmdc |= NQTXC_CMD_TCP;
7649 		} else {
7650 			cmdc |= NQTXC_CMD_UDP;
7651 		}
7652 		cmdc |= NQTXC_CMD_IP6;
7653 		*fieldsp |= NQTXD_FIELDS_TUXSM;
7654 	}
7655 
7656 	/*
7657 	 * We don't have to write context descriptor for every packet to
7658 	 * NEWQUEUE controllers, that is 82575, 82576, 82580, I350, I354,
7659 	 * I210 and I211. It is enough to write once per a Tx queue for these
7660 	 * controllers.
7661 	 * It would be overhead to write context descriptor for every packet,
7662 	 * however it does not cause problems.
7663 	 */
7664 	/* Fill in the context descriptor. */
7665 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_vl_len =
7666 	    htole32(vl_len);
7667 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_sn = 0;
7668 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_cmd =
7669 	    htole32(cmdc);
7670 	txq->txq_nq_descs[txq->txq_next].nqrx_ctx.nqtxc_mssidx =
7671 	    htole32(mssidx);
7672 	wm_cdtxsync(txq, txq->txq_next, 1, BUS_DMASYNC_PREWRITE);
7673 	DPRINTF(WM_DEBUG_TX,
7674 	    ("%s: TX: context desc %d 0x%08x%08x\n", device_xname(sc->sc_dev),
7675 		txq->txq_next, 0, vl_len));
7676 	DPRINTF(WM_DEBUG_TX, ("\t0x%08x%08x\n", mssidx, cmdc));
7677 	txq->txq_next = WM_NEXTTX(txq, txq->txq_next);
7678 	txs->txs_ndesc++;
7679 	return 0;
7680 }
7681 
7682 /*
7683  * wm_nq_start:		[ifnet interface function]
7684  *
7685  *	Start packet transmission on the interface for NEWQUEUE devices
7686  */
7687 static void
7688 wm_nq_start(struct ifnet *ifp)
7689 {
7690 	struct wm_softc *sc = ifp->if_softc;
7691 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7692 
7693 #ifdef WM_MPSAFE
7694 	KASSERT(if_is_mpsafe(ifp));
7695 #endif
7696 	/*
7697 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
7698 	 */
7699 
7700 	mutex_enter(txq->txq_lock);
7701 	if (!txq->txq_stopping)
7702 		wm_nq_start_locked(ifp);
7703 	mutex_exit(txq->txq_lock);
7704 }
7705 
7706 static void
7707 wm_nq_start_locked(struct ifnet *ifp)
7708 {
7709 	struct wm_softc *sc = ifp->if_softc;
7710 	struct wm_txqueue *txq = &sc->sc_queue[0].wmq_txq;
7711 
7712 	wm_nq_send_common_locked(ifp, txq, false);
7713 }
7714 
7715 static int
7716 wm_nq_transmit(struct ifnet *ifp, struct mbuf *m)
7717 {
7718 	int qid;
7719 	struct wm_softc *sc = ifp->if_softc;
7720 	struct wm_txqueue *txq;
7721 
7722 	qid = wm_select_txqueue(ifp, m);
7723 	txq = &sc->sc_queue[qid].wmq_txq;
7724 
7725 	if (__predict_false(!pcq_put(txq->txq_interq, m))) {
7726 		m_freem(m);
7727 		WM_Q_EVCNT_INCR(txq, txdrop);
7728 		return ENOBUFS;
7729 	}
7730 
7731 	/*
7732 	 * XXXX NOMPSAFE: ifp->if_data should be percpu.
7733 	 */
7734 	ifp->if_obytes += m->m_pkthdr.len;
7735 	if (m->m_flags & M_MCAST)
7736 		ifp->if_omcasts++;
7737 
7738 	/*
7739 	 * The situations which this mutex_tryenter() fails at running time
7740 	 * are below two patterns.
7741 	 *     (1) contention with interrupt handler(wm_txrxintr_msix())
7742 	 *     (2) contention with deferred if_start softint(wm_handle_queue())
7743 	 * In the case of (1), the last packet enqueued to txq->txq_interq is
7744 	 * dequeued by wm_deferred_start_locked(). So, it does not get stuck.
7745 	 * In the case of (2), the last packet enqueued to txq->txq_interq is
7746 	 * also dequeued by wm_deferred_start_locked(). So, it does not get
7747 	 * stuck, either.
7748 	 */
7749 	if (mutex_tryenter(txq->txq_lock)) {
7750 		if (!txq->txq_stopping)
7751 			wm_nq_transmit_locked(ifp, txq);
7752 		mutex_exit(txq->txq_lock);
7753 	}
7754 
7755 	return 0;
7756 }
7757 
7758 static void
7759 wm_nq_transmit_locked(struct ifnet *ifp, struct wm_txqueue *txq)
7760 {
7761 
7762 	wm_nq_send_common_locked(ifp, txq, true);
7763 }
7764 
7765 static void
7766 wm_nq_send_common_locked(struct ifnet *ifp, struct wm_txqueue *txq,
7767     bool is_transmit)
7768 {
7769 	struct wm_softc *sc = ifp->if_softc;
7770 	struct mbuf *m0;
7771 	struct wm_txsoft *txs;
7772 	bus_dmamap_t dmamap;
7773 	int error, nexttx, lasttx = -1, seg, segs_needed;
7774 	bool do_csum, sent;
7775 
7776 	KASSERT(mutex_owned(txq->txq_lock));
7777 
7778 	if ((ifp->if_flags & IFF_RUNNING) == 0)
7779 		return;
7780 	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
7781 		return;
7782 	if ((txq->txq_flags & WM_TXQ_NO_SPACE) != 0)
7783 		return;
7784 
7785 	sent = false;
7786 
7787 	/*
7788 	 * Loop through the send queue, setting up transmit descriptors
7789 	 * until we drain the queue, or use up all available transmit
7790 	 * descriptors.
7791 	 */
7792 	for (;;) {
7793 		m0 = NULL;
7794 
7795 		/* Get a work queue entry. */
7796 		if (txq->txq_sfree < WM_TXQUEUE_GC(txq)) {
7797 			wm_txeof(txq, UINT_MAX);
7798 			if (txq->txq_sfree == 0) {
7799 				DPRINTF(WM_DEBUG_TX,
7800 				    ("%s: TX: no free job descriptors\n",
7801 					device_xname(sc->sc_dev)));
7802 				WM_Q_EVCNT_INCR(txq, txsstall);
7803 				break;
7804 			}
7805 		}
7806 
7807 		/* Grab a packet off the queue. */
7808 		if (is_transmit)
7809 			m0 = pcq_get(txq->txq_interq);
7810 		else
7811 			IFQ_DEQUEUE(&ifp->if_snd, m0);
7812 		if (m0 == NULL)
7813 			break;
7814 
7815 		DPRINTF(WM_DEBUG_TX,
7816 		    ("%s: TX: have packet to transmit: %p\n",
7817 		    device_xname(sc->sc_dev), m0));
7818 
7819 		txs = &txq->txq_soft[txq->txq_snext];
7820 		dmamap = txs->txs_dmamap;
7821 
7822 		/*
7823 		 * Load the DMA map.  If this fails, the packet either
7824 		 * didn't fit in the allotted number of segments, or we
7825 		 * were short on resources.  For the too-many-segments
7826 		 * case, we simply report an error and drop the packet,
7827 		 * since we can't sanely copy a jumbo packet to a single
7828 		 * buffer.
7829 		 */
7830 		error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
7831 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
7832 		if (error) {
7833 			if (error == EFBIG) {
7834 				WM_Q_EVCNT_INCR(txq, txdrop);
7835 				log(LOG_ERR, "%s: Tx packet consumes too many "
7836 				    "DMA segments, dropping...\n",
7837 				    device_xname(sc->sc_dev));
7838 				wm_dump_mbuf_chain(sc, m0);
7839 				m_freem(m0);
7840 				continue;
7841 			}
7842 			/* Short on resources, just stop for now. */
7843 			DPRINTF(WM_DEBUG_TX,
7844 			    ("%s: TX: dmamap load failed: %d\n",
7845 				device_xname(sc->sc_dev), error));
7846 			break;
7847 		}
7848 
7849 		segs_needed = dmamap->dm_nsegs;
7850 
7851 		/*
7852 		 * Ensure we have enough descriptors free to describe
7853 		 * the packet. Note, we always reserve one descriptor
7854 		 * at the end of the ring due to the semantics of the
7855 		 * TDT register, plus one more in the event we need
7856 		 * to load offload context.
7857 		 */
7858 		if (segs_needed > txq->txq_free - 2) {
7859 			/*
7860 			 * Not enough free descriptors to transmit this
7861 			 * packet.  We haven't committed anything yet,
7862 			 * so just unload the DMA map, put the packet
7863 			 * pack on the queue, and punt. Notify the upper
7864 			 * layer that there are no more slots left.
7865 			 */
7866 			DPRINTF(WM_DEBUG_TX,
7867 			    ("%s: TX: need %d (%d) descriptors, have %d\n",
7868 				device_xname(sc->sc_dev), dmamap->dm_nsegs,
7869 				segs_needed, txq->txq_free - 1));
7870 			if (!is_transmit)
7871 				ifp->if_flags |= IFF_OACTIVE;
7872 			txq->txq_flags |= WM_TXQ_NO_SPACE;
7873 			bus_dmamap_unload(sc->sc_dmat, dmamap);
7874 			WM_Q_EVCNT_INCR(txq, txdstall);
7875 			break;
7876 		}
7877 
7878 		/* WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */
7879 
7880 		DPRINTF(WM_DEBUG_TX,
7881 		    ("%s: TX: packet has %d (%d) DMA segments\n",
7882 		    device_xname(sc->sc_dev), dmamap->dm_nsegs, segs_needed));
7883 
7884 		WM_EVCNT_INCR(&txq->txq_ev_txseg[dmamap->dm_nsegs - 1]);
7885 
7886 		/*
7887 		 * Store a pointer to the packet so that we can free it
7888 		 * later.
7889 		 *
7890 		 * Initially, we consider the number of descriptors the
7891 		 * packet uses the number of DMA segments.  This may be
7892 		 * incremented by 1 if we do checksum offload (a descriptor
7893 		 * is used to set the checksum context).
7894 		 */
7895 		txs->txs_mbuf = m0;
7896 		txs->txs_firstdesc = txq->txq_next;
7897 		txs->txs_ndesc = segs_needed;
7898 
7899 		/* Set up offload parameters for this packet. */
7900 		uint32_t cmdlen, fields, dcmdlen;
7901 		if (m0->m_pkthdr.csum_flags &
7902 		    (M_CSUM_TSOv4 | M_CSUM_TSOv6 |
7903 			M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4 |
7904 			M_CSUM_TCPv6 | M_CSUM_UDPv6)) {
7905 			if (wm_nq_tx_offload(sc, txq, txs, &cmdlen, &fields,
7906 			    &do_csum) != 0) {
7907 				/* Error message already displayed. */
7908 				bus_dmamap_unload(sc->sc_dmat, dmamap);
7909 				continue;
7910 			}
7911 		} else {
7912 			do_csum = false;
7913 			cmdlen = 0;
7914 			fields = 0;
7915 		}
7916 
7917 		/* Sync the DMA map. */
7918 		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
7919 		    BUS_DMASYNC_PREWRITE);
7920 
7921 		/* Initialize the first transmit descriptor. */
7922 		nexttx = txq->txq_next;
7923 		if (!do_csum) {
7924 			/* setup a legacy descriptor */
7925 			wm_set_dma_addr(&txq->txq_descs[nexttx].wtx_addr,
7926 			    dmamap->dm_segs[0].ds_addr);
7927 			txq->txq_descs[nexttx].wtx_cmdlen =
7928 			    htole32(WTX_CMD_IFCS | dmamap->dm_segs[0].ds_len);
7929 			txq->txq_descs[nexttx].wtx_fields.wtxu_status = 0;
7930 			txq->txq_descs[nexttx].wtx_fields.wtxu_options = 0;
7931 			if (vlan_has_tag(m0)) {
7932 				txq->txq_descs[nexttx].wtx_cmdlen |=
7933 				    htole32(WTX_CMD_VLE);
7934 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =
7935 				    htole16(vlan_get_tag(m0));
7936 			} else {
7937 				txq->txq_descs[nexttx].wtx_fields.wtxu_vlan =0;
7938 			}
7939 			dcmdlen = 0;
7940 		} else {
7941 			/* setup an advanced data descriptor */
7942 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7943 			    htole64(dmamap->dm_segs[0].ds_addr);
7944 			KASSERT((dmamap->dm_segs[0].ds_len & cmdlen) == 0);
7945 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7946 			    htole32(dmamap->dm_segs[0].ds_len | cmdlen );
7947 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields =
7948 			    htole32(fields);
7949 			DPRINTF(WM_DEBUG_TX,
7950 			    ("%s: TX: adv data desc %d 0x%" PRIx64 "\n",
7951 				device_xname(sc->sc_dev), nexttx,
7952 				(uint64_t)dmamap->dm_segs[0].ds_addr));
7953 			DPRINTF(WM_DEBUG_TX,
7954 			    ("\t 0x%08x%08x\n", fields,
7955 				(uint32_t)dmamap->dm_segs[0].ds_len | cmdlen));
7956 			dcmdlen = NQTX_DTYP_D | NQTX_CMD_DEXT;
7957 		}
7958 
7959 		lasttx = nexttx;
7960 		nexttx = WM_NEXTTX(txq, nexttx);
7961 		/*
7962 		 * fill in the next descriptors. legacy or advanced format
7963 		 * is the same here
7964 		 */
7965 		for (seg = 1; seg < dmamap->dm_nsegs;
7966 		     seg++, nexttx = WM_NEXTTX(txq, nexttx)) {
7967 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_addr =
7968 			    htole64(dmamap->dm_segs[seg].ds_addr);
7969 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_cmdlen =
7970 			    htole32(dcmdlen | dmamap->dm_segs[seg].ds_len);
7971 			KASSERT((dcmdlen & dmamap->dm_segs[seg].ds_len) == 0);
7972 			txq->txq_nq_descs[nexttx].nqtx_data.nqtxd_fields = 0;
7973 			lasttx = nexttx;
7974 
7975 			DPRINTF(WM_DEBUG_TX,
7976 			    ("%s: TX: desc %d: %#" PRIx64 ", len %#04zx\n",
7977 				device_xname(sc->sc_dev), nexttx,
7978 				(uint64_t)dmamap->dm_segs[seg].ds_addr,
7979 				dmamap->dm_segs[seg].ds_len));
7980 		}
7981 
7982 		KASSERT(lasttx != -1);
7983 
7984 		/*
7985 		 * Set up the command byte on the last descriptor of
7986 		 * the packet. If we're in the interrupt delay window,
7987 		 * delay the interrupt.
7988 		 */
7989 		KASSERT((WTX_CMD_EOP | WTX_CMD_RS) ==
7990 		    (NQTX_CMD_EOP | NQTX_CMD_RS));
7991 		txq->txq_descs[lasttx].wtx_cmdlen |=
7992 		    htole32(WTX_CMD_EOP | WTX_CMD_RS);
7993 
7994 		txs->txs_lastdesc = lasttx;
7995 
7996 		DPRINTF(WM_DEBUG_TX, ("%s: TX: desc %d: cmdlen 0x%08x\n",
7997 		    device_xname(sc->sc_dev),
7998 		    lasttx, le32toh(txq->txq_descs[lasttx].wtx_cmdlen)));
7999 
8000 		/* Sync the descriptors we're using. */
8001 		wm_cdtxsync(txq, txq->txq_next, txs->txs_ndesc,
8002 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
8003 
8004 		/* Give the packet to the chip. */
8005 		CSR_WRITE(sc, txq->txq_tdt_reg, nexttx);
8006 		sent = true;
8007 
8008 		DPRINTF(WM_DEBUG_TX,
8009 		    ("%s: TX: TDT -> %d\n", device_xname(sc->sc_dev), nexttx));
8010 
8011 		DPRINTF(WM_DEBUG_TX,
8012 		    ("%s: TX: finished transmitting packet, job %d\n",
8013 			device_xname(sc->sc_dev), txq->txq_snext));
8014 
8015 		/* Advance the tx pointer. */
8016 		txq->txq_free -= txs->txs_ndesc;
8017 		txq->txq_next = nexttx;
8018 
8019 		txq->txq_sfree--;
8020 		txq->txq_snext = WM_NEXTTXS(txq, txq->txq_snext);
8021 
8022 		/* Pass the packet to any BPF listeners. */
8023 		bpf_mtap(ifp, m0, BPF_D_OUT);
8024 	}
8025 
8026 	if (m0 != NULL) {
8027 		if (!is_transmit)
8028 			ifp->if_flags |= IFF_OACTIVE;
8029 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8030 		WM_Q_EVCNT_INCR(txq, txdrop);
8031 		DPRINTF(WM_DEBUG_TX, ("%s: TX: error after IFQ_DEQUEUE\n",
8032 			__func__));
8033 		m_freem(m0);
8034 	}
8035 
8036 	if (txq->txq_sfree == 0 || txq->txq_free <= 2) {
8037 		/* No more slots; notify upper layer. */
8038 		if (!is_transmit)
8039 			ifp->if_flags |= IFF_OACTIVE;
8040 		txq->txq_flags |= WM_TXQ_NO_SPACE;
8041 	}
8042 
8043 	if (sent) {
8044 		/* Set a watchdog timer in case the chip flakes out. */
8045 		txq->txq_lastsent = time_uptime;
8046 		txq->txq_sending = true;
8047 	}
8048 }
8049 
8050 static void
8051 wm_deferred_start_locked(struct wm_txqueue *txq)
8052 {
8053 	struct wm_softc *sc = txq->txq_sc;
8054 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8055 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8056 	int qid = wmq->wmq_id;
8057 
8058 	KASSERT(mutex_owned(txq->txq_lock));
8059 
8060 	if (txq->txq_stopping) {
8061 		mutex_exit(txq->txq_lock);
8062 		return;
8063 	}
8064 
8065 	if ((sc->sc_flags & WM_F_NEWQUEUE) != 0) {
8066 		/* XXX need for ALTQ or one CPU system */
8067 		if (qid == 0)
8068 			wm_nq_start_locked(ifp);
8069 		wm_nq_transmit_locked(ifp, txq);
8070 	} else {
8071 		/* XXX need for ALTQ or one CPU system */
8072 		if (qid == 0)
8073 			wm_start_locked(ifp);
8074 		wm_transmit_locked(ifp, txq);
8075 	}
8076 }
8077 
8078 /* Interrupt */
8079 
8080 /*
8081  * wm_txeof:
8082  *
8083  *	Helper; handle transmit interrupts.
8084  */
8085 static bool
8086 wm_txeof(struct wm_txqueue *txq, u_int limit)
8087 {
8088 	struct wm_softc *sc = txq->txq_sc;
8089 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8090 	struct wm_txsoft *txs;
8091 	int count = 0;
8092 	int i;
8093 	uint8_t status;
8094 	struct wm_queue *wmq = container_of(txq, struct wm_queue, wmq_txq);
8095 	bool more = false;
8096 
8097 	KASSERT(mutex_owned(txq->txq_lock));
8098 
8099 	if (txq->txq_stopping)
8100 		return false;
8101 
8102 	txq->txq_flags &= ~WM_TXQ_NO_SPACE;
8103 	/* for ALTQ and legacy(not use multiqueue) ethernet controller */
8104 	if (wmq->wmq_id == 0)
8105 		ifp->if_flags &= ~IFF_OACTIVE;
8106 
8107 	/*
8108 	 * Go through the Tx list and free mbufs for those
8109 	 * frames which have been transmitted.
8110 	 */
8111 	for (i = txq->txq_sdirty; txq->txq_sfree != WM_TXQUEUELEN(txq);
8112 	     i = WM_NEXTTXS(txq, i), txq->txq_sfree++) {
8113 		if (limit-- == 0) {
8114 			more = true;
8115 			DPRINTF(WM_DEBUG_TX,
8116 			    ("%s: TX: loop limited, job %d is not processed\n",
8117 				device_xname(sc->sc_dev), i));
8118 			break;
8119 		}
8120 
8121 		txs = &txq->txq_soft[i];
8122 
8123 		DPRINTF(WM_DEBUG_TX, ("%s: TX: checking job %d\n",
8124 			device_xname(sc->sc_dev), i));
8125 
8126 		wm_cdtxsync(txq, txs->txs_firstdesc, txs->txs_ndesc,
8127 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8128 
8129 		status =
8130 		    txq->txq_descs[txs->txs_lastdesc].wtx_fields.wtxu_status;
8131 		if ((status & WTX_ST_DD) == 0) {
8132 			wm_cdtxsync(txq, txs->txs_lastdesc, 1,
8133 			    BUS_DMASYNC_PREREAD);
8134 			break;
8135 		}
8136 
8137 		count++;
8138 		DPRINTF(WM_DEBUG_TX,
8139 		    ("%s: TX: job %d done: descs %d..%d\n",
8140 		    device_xname(sc->sc_dev), i, txs->txs_firstdesc,
8141 		    txs->txs_lastdesc));
8142 
8143 		/*
8144 		 * XXX We should probably be using the statistics
8145 		 * XXX registers, but I don't know if they exist
8146 		 * XXX on chips before the i82544.
8147 		 */
8148 
8149 #ifdef WM_EVENT_COUNTERS
8150 		if (status & WTX_ST_TU)
8151 			WM_Q_EVCNT_INCR(txq, tu);
8152 #endif /* WM_EVENT_COUNTERS */
8153 
8154 		if (status & (WTX_ST_EC | WTX_ST_LC)) {
8155 			ifp->if_oerrors++;
8156 			if (status & WTX_ST_LC)
8157 				log(LOG_WARNING, "%s: late collision\n",
8158 				    device_xname(sc->sc_dev));
8159 			else if (status & WTX_ST_EC) {
8160 				ifp->if_collisions += 16;
8161 				log(LOG_WARNING, "%s: excessive collisions\n",
8162 				    device_xname(sc->sc_dev));
8163 			}
8164 		} else
8165 			ifp->if_opackets++;
8166 
8167 		txq->txq_packets++;
8168 		txq->txq_bytes += txs->txs_mbuf->m_pkthdr.len;
8169 
8170 		txq->txq_free += txs->txs_ndesc;
8171 		bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap,
8172 		    0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
8173 		bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap);
8174 		m_freem(txs->txs_mbuf);
8175 		txs->txs_mbuf = NULL;
8176 	}
8177 
8178 	/* Update the dirty transmit buffer pointer. */
8179 	txq->txq_sdirty = i;
8180 	DPRINTF(WM_DEBUG_TX,
8181 	    ("%s: TX: txsdirty -> %d\n", device_xname(sc->sc_dev), i));
8182 
8183 	if (count != 0)
8184 		rnd_add_uint32(&sc->rnd_source, count);
8185 
8186 	/*
8187 	 * If there are no more pending transmissions, cancel the watchdog
8188 	 * timer.
8189 	 */
8190 	if (txq->txq_sfree == WM_TXQUEUELEN(txq))
8191 		txq->txq_sending = false;
8192 
8193 	return more;
8194 }
8195 
8196 static inline uint32_t
8197 wm_rxdesc_get_status(struct wm_rxqueue *rxq, int idx)
8198 {
8199 	struct wm_softc *sc = rxq->rxq_sc;
8200 
8201 	if (sc->sc_type == WM_T_82574)
8202 		return EXTRXC_STATUS(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8203 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8204 		return NQRXC_STATUS(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8205 	else
8206 		return rxq->rxq_descs[idx].wrx_status;
8207 }
8208 
8209 static inline uint32_t
8210 wm_rxdesc_get_errors(struct wm_rxqueue *rxq, int idx)
8211 {
8212 	struct wm_softc *sc = rxq->rxq_sc;
8213 
8214 	if (sc->sc_type == WM_T_82574)
8215 		return EXTRXC_ERROR(rxq->rxq_ext_descs[idx].erx_ctx.erxc_err_stat);
8216 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8217 		return NQRXC_ERROR(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_err_stat);
8218 	else
8219 		return rxq->rxq_descs[idx].wrx_errors;
8220 }
8221 
8222 static inline uint16_t
8223 wm_rxdesc_get_vlantag(struct wm_rxqueue *rxq, int idx)
8224 {
8225 	struct wm_softc *sc = rxq->rxq_sc;
8226 
8227 	if (sc->sc_type == WM_T_82574)
8228 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_vlan;
8229 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8230 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_vlan;
8231 	else
8232 		return rxq->rxq_descs[idx].wrx_special;
8233 }
8234 
8235 static inline int
8236 wm_rxdesc_get_pktlen(struct wm_rxqueue *rxq, int idx)
8237 {
8238 	struct wm_softc *sc = rxq->rxq_sc;
8239 
8240 	if (sc->sc_type == WM_T_82574)
8241 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_pktlen;
8242 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8243 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_pktlen;
8244 	else
8245 		return rxq->rxq_descs[idx].wrx_len;
8246 }
8247 
8248 #ifdef WM_DEBUG
8249 static inline uint32_t
8250 wm_rxdesc_get_rsshash(struct wm_rxqueue *rxq, int idx)
8251 {
8252 	struct wm_softc *sc = rxq->rxq_sc;
8253 
8254 	if (sc->sc_type == WM_T_82574)
8255 		return rxq->rxq_ext_descs[idx].erx_ctx.erxc_rsshash;
8256 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8257 		return rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_rsshash;
8258 	else
8259 		return 0;
8260 }
8261 
8262 static inline uint8_t
8263 wm_rxdesc_get_rsstype(struct wm_rxqueue *rxq, int idx)
8264 {
8265 	struct wm_softc *sc = rxq->rxq_sc;
8266 
8267 	if (sc->sc_type == WM_T_82574)
8268 		return EXTRXC_RSS_TYPE(rxq->rxq_ext_descs[idx].erx_ctx.erxc_mrq);
8269 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8270 		return NQRXC_RSS_TYPE(rxq->rxq_nq_descs[idx].nqrx_ctx.nrxc_misc);
8271 	else
8272 		return 0;
8273 }
8274 #endif /* WM_DEBUG */
8275 
8276 static inline bool
8277 wm_rxdesc_is_set_status(struct wm_softc *sc, uint32_t status,
8278     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8279 {
8280 
8281 	if (sc->sc_type == WM_T_82574)
8282 		return (status & ext_bit) != 0;
8283 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8284 		return (status & nq_bit) != 0;
8285 	else
8286 		return (status & legacy_bit) != 0;
8287 }
8288 
8289 static inline bool
8290 wm_rxdesc_is_set_error(struct wm_softc *sc, uint32_t error,
8291     uint32_t legacy_bit, uint32_t ext_bit, uint32_t nq_bit)
8292 {
8293 
8294 	if (sc->sc_type == WM_T_82574)
8295 		return (error & ext_bit) != 0;
8296 	else if ((sc->sc_flags & WM_F_NEWQUEUE) != 0)
8297 		return (error & nq_bit) != 0;
8298 	else
8299 		return (error & legacy_bit) != 0;
8300 }
8301 
8302 static inline bool
8303 wm_rxdesc_is_eop(struct wm_rxqueue *rxq, uint32_t status)
8304 {
8305 
8306 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8307 		WRX_ST_EOP, EXTRXC_STATUS_EOP, NQRXC_STATUS_EOP))
8308 		return true;
8309 	else
8310 		return false;
8311 }
8312 
8313 static inline bool
8314 wm_rxdesc_has_errors(struct wm_rxqueue *rxq, uint32_t errors)
8315 {
8316 	struct wm_softc *sc = rxq->rxq_sc;
8317 
8318 	/* XXXX missing error bit for newqueue? */
8319 	if (wm_rxdesc_is_set_error(sc, errors,
8320 		WRX_ER_CE | WRX_ER_SE | WRX_ER_SEQ | WRX_ER_CXE | WRX_ER_RXE,
8321 		EXTRXC_ERROR_CE | EXTRXC_ERROR_SE | EXTRXC_ERROR_SEQ
8322 		| EXTRXC_ERROR_CXE | EXTRXC_ERROR_RXE,
8323 		NQRXC_ERROR_RXE)) {
8324 		if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SE,
8325 		    EXTRXC_ERROR_SE, 0))
8326 			log(LOG_WARNING, "%s: symbol error\n",
8327 			    device_xname(sc->sc_dev));
8328 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_SEQ,
8329 		    EXTRXC_ERROR_SEQ, 0))
8330 			log(LOG_WARNING, "%s: receive sequence error\n",
8331 			    device_xname(sc->sc_dev));
8332 		else if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_CE,
8333 		    EXTRXC_ERROR_CE, 0))
8334 			log(LOG_WARNING, "%s: CRC error\n",
8335 			    device_xname(sc->sc_dev));
8336 		return true;
8337 	}
8338 
8339 	return false;
8340 }
8341 
8342 static inline bool
8343 wm_rxdesc_dd(struct wm_rxqueue *rxq, int idx, uint32_t status)
8344 {
8345 	struct wm_softc *sc = rxq->rxq_sc;
8346 
8347 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_DD, EXTRXC_STATUS_DD,
8348 		NQRXC_STATUS_DD)) {
8349 		/* We have processed all of the receive descriptors. */
8350 		wm_cdrxsync(rxq, idx, BUS_DMASYNC_PREREAD);
8351 		return false;
8352 	}
8353 
8354 	return true;
8355 }
8356 
8357 static inline bool
8358 wm_rxdesc_input_vlantag(struct wm_rxqueue *rxq, uint32_t status,
8359     uint16_t vlantag, struct mbuf *m)
8360 {
8361 
8362 	if (wm_rxdesc_is_set_status(rxq->rxq_sc, status,
8363 		WRX_ST_VP, EXTRXC_STATUS_VP, NQRXC_STATUS_VP)) {
8364 		vlan_set_tag(m, le16toh(vlantag));
8365 	}
8366 
8367 	return true;
8368 }
8369 
8370 static inline void
8371 wm_rxdesc_ensure_checksum(struct wm_rxqueue *rxq, uint32_t status,
8372     uint32_t errors, struct mbuf *m)
8373 {
8374 	struct wm_softc *sc = rxq->rxq_sc;
8375 
8376 	if (!wm_rxdesc_is_set_status(sc, status, WRX_ST_IXSM, 0, 0)) {
8377 		if (wm_rxdesc_is_set_status(sc, status,
8378 			WRX_ST_IPCS, EXTRXC_STATUS_IPCS, NQRXC_STATUS_IPCS)) {
8379 			WM_Q_EVCNT_INCR(rxq, rxipsum);
8380 			m->m_pkthdr.csum_flags |= M_CSUM_IPv4;
8381 			if (wm_rxdesc_is_set_error(sc, errors,
8382 				WRX_ER_IPE, EXTRXC_ERROR_IPE, NQRXC_ERROR_IPE))
8383 				m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD;
8384 		}
8385 		if (wm_rxdesc_is_set_status(sc, status,
8386 			WRX_ST_TCPCS, EXTRXC_STATUS_TCPCS, NQRXC_STATUS_L4I)) {
8387 			/*
8388 			 * Note: we don't know if this was TCP or UDP,
8389 			 * so we just set both bits, and expect the
8390 			 * upper layers to deal.
8391 			 */
8392 			WM_Q_EVCNT_INCR(rxq, rxtusum);
8393 			m->m_pkthdr.csum_flags |=
8394 			    M_CSUM_TCPv4 | M_CSUM_UDPv4 |
8395 			    M_CSUM_TCPv6 | M_CSUM_UDPv6;
8396 			if (wm_rxdesc_is_set_error(sc, errors, WRX_ER_TCPE,
8397 			    EXTRXC_ERROR_TCPE, NQRXC_ERROR_L4E))
8398 				m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD;
8399 		}
8400 	}
8401 }
8402 
8403 /*
8404  * wm_rxeof:
8405  *
8406  *	Helper; handle receive interrupts.
8407  */
8408 static bool
8409 wm_rxeof(struct wm_rxqueue *rxq, u_int limit)
8410 {
8411 	struct wm_softc *sc = rxq->rxq_sc;
8412 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8413 	struct wm_rxsoft *rxs;
8414 	struct mbuf *m;
8415 	int i, len;
8416 	int count = 0;
8417 	uint32_t status, errors;
8418 	uint16_t vlantag;
8419 	bool more = false;
8420 
8421 	KASSERT(mutex_owned(rxq->rxq_lock));
8422 
8423 	for (i = rxq->rxq_ptr;; i = WM_NEXTRX(i)) {
8424 		if (limit-- == 0) {
8425 			rxq->rxq_ptr = i;
8426 			more = true;
8427 			DPRINTF(WM_DEBUG_RX,
8428 			    ("%s: RX: loop limited, descriptor %d is not processed\n",
8429 				device_xname(sc->sc_dev), i));
8430 			break;
8431 		}
8432 
8433 		rxs = &rxq->rxq_soft[i];
8434 
8435 		DPRINTF(WM_DEBUG_RX,
8436 		    ("%s: RX: checking descriptor %d\n",
8437 			device_xname(sc->sc_dev), i));
8438 		wm_cdrxsync(rxq, i,
8439 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
8440 
8441 		status = wm_rxdesc_get_status(rxq, i);
8442 		errors = wm_rxdesc_get_errors(rxq, i);
8443 		len = le16toh(wm_rxdesc_get_pktlen(rxq, i));
8444 		vlantag = wm_rxdesc_get_vlantag(rxq, i);
8445 #ifdef WM_DEBUG
8446 		uint32_t rsshash = le32toh(wm_rxdesc_get_rsshash(rxq, i));
8447 		uint8_t rsstype = wm_rxdesc_get_rsstype(rxq, i);
8448 #endif
8449 
8450 		if (!wm_rxdesc_dd(rxq, i, status)) {
8451 			/*
8452 			 * Update the receive pointer holding rxq_lock
8453 			 * consistent with increment counter.
8454 			 */
8455 			rxq->rxq_ptr = i;
8456 			break;
8457 		}
8458 
8459 		count++;
8460 		if (__predict_false(rxq->rxq_discard)) {
8461 			DPRINTF(WM_DEBUG_RX,
8462 			    ("%s: RX: discarding contents of descriptor %d\n",
8463 				device_xname(sc->sc_dev), i));
8464 			wm_init_rxdesc(rxq, i);
8465 			if (wm_rxdesc_is_eop(rxq, status)) {
8466 				/* Reset our state. */
8467 				DPRINTF(WM_DEBUG_RX,
8468 				    ("%s: RX: resetting rxdiscard -> 0\n",
8469 					device_xname(sc->sc_dev)));
8470 				rxq->rxq_discard = 0;
8471 			}
8472 			continue;
8473 		}
8474 
8475 		bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8476 		    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
8477 
8478 		m = rxs->rxs_mbuf;
8479 
8480 		/*
8481 		 * Add a new receive buffer to the ring, unless of
8482 		 * course the length is zero. Treat the latter as a
8483 		 * failed mapping.
8484 		 */
8485 		if ((len == 0) || (wm_add_rxbuf(rxq, i) != 0)) {
8486 			/*
8487 			 * Failed, throw away what we've done so
8488 			 * far, and discard the rest of the packet.
8489 			 */
8490 			ifp->if_ierrors++;
8491 			bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0,
8492 			    rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
8493 			wm_init_rxdesc(rxq, i);
8494 			if (!wm_rxdesc_is_eop(rxq, status))
8495 				rxq->rxq_discard = 1;
8496 			if (rxq->rxq_head != NULL)
8497 				m_freem(rxq->rxq_head);
8498 			WM_RXCHAIN_RESET(rxq);
8499 			DPRINTF(WM_DEBUG_RX,
8500 			    ("%s: RX: Rx buffer allocation failed, "
8501 			    "dropping packet%s\n", device_xname(sc->sc_dev),
8502 				rxq->rxq_discard ? " (discard)" : ""));
8503 			continue;
8504 		}
8505 
8506 		m->m_len = len;
8507 		rxq->rxq_len += len;
8508 		DPRINTF(WM_DEBUG_RX,
8509 		    ("%s: RX: buffer at %p len %d\n",
8510 			device_xname(sc->sc_dev), m->m_data, len));
8511 
8512 		/* If this is not the end of the packet, keep looking. */
8513 		if (!wm_rxdesc_is_eop(rxq, status)) {
8514 			WM_RXCHAIN_LINK(rxq, m);
8515 			DPRINTF(WM_DEBUG_RX,
8516 			    ("%s: RX: not yet EOP, rxlen -> %d\n",
8517 				device_xname(sc->sc_dev), rxq->rxq_len));
8518 			continue;
8519 		}
8520 
8521 		/*
8522 		 * Okay, we have the entire packet now. The chip is
8523 		 * configured to include the FCS except I350 and I21[01]
8524 		 * (not all chips can be configured to strip it),
8525 		 * so we need to trim it.
8526 		 * May need to adjust length of previous mbuf in the
8527 		 * chain if the current mbuf is too short.
8528 		 * For an eratta, the RCTL_SECRC bit in RCTL register
8529 		 * is always set in I350, so we don't trim it.
8530 		 */
8531 		if ((sc->sc_type != WM_T_I350) && (sc->sc_type != WM_T_I354)
8532 		    && (sc->sc_type != WM_T_I210)
8533 		    && (sc->sc_type != WM_T_I211)) {
8534 			if (m->m_len < ETHER_CRC_LEN) {
8535 				rxq->rxq_tail->m_len
8536 				    -= (ETHER_CRC_LEN - m->m_len);
8537 				m->m_len = 0;
8538 			} else
8539 				m->m_len -= ETHER_CRC_LEN;
8540 			len = rxq->rxq_len - ETHER_CRC_LEN;
8541 		} else
8542 			len = rxq->rxq_len;
8543 
8544 		WM_RXCHAIN_LINK(rxq, m);
8545 
8546 		*rxq->rxq_tailp = NULL;
8547 		m = rxq->rxq_head;
8548 
8549 		WM_RXCHAIN_RESET(rxq);
8550 
8551 		DPRINTF(WM_DEBUG_RX,
8552 		    ("%s: RX: have entire packet, len -> %d\n",
8553 			device_xname(sc->sc_dev), len));
8554 
8555 		/* If an error occurred, update stats and drop the packet. */
8556 		if (wm_rxdesc_has_errors(rxq, errors)) {
8557 			m_freem(m);
8558 			continue;
8559 		}
8560 
8561 		/* No errors.  Receive the packet. */
8562 		m_set_rcvif(m, ifp);
8563 		m->m_pkthdr.len = len;
8564 		/*
8565 		 * TODO
8566 		 * should be save rsshash and rsstype to this mbuf.
8567 		 */
8568 		DPRINTF(WM_DEBUG_RX,
8569 		    ("%s: RX: RSS type=%" PRIu8 ", RSS hash=%" PRIu32 "\n",
8570 			device_xname(sc->sc_dev), rsstype, rsshash));
8571 
8572 		/*
8573 		 * If VLANs are enabled, VLAN packets have been unwrapped
8574 		 * for us.  Associate the tag with the packet.
8575 		 */
8576 		if (!wm_rxdesc_input_vlantag(rxq, status, vlantag, m))
8577 			continue;
8578 
8579 		/* Set up checksum info for this packet. */
8580 		wm_rxdesc_ensure_checksum(rxq, status, errors, m);
8581 		/*
8582 		 * Update the receive pointer holding rxq_lock consistent with
8583 		 * increment counter.
8584 		 */
8585 		rxq->rxq_ptr = i;
8586 		rxq->rxq_packets++;
8587 		rxq->rxq_bytes += len;
8588 		mutex_exit(rxq->rxq_lock);
8589 
8590 		/* Pass it on. */
8591 		if_percpuq_enqueue(sc->sc_ipq, m);
8592 
8593 		mutex_enter(rxq->rxq_lock);
8594 
8595 		if (rxq->rxq_stopping)
8596 			break;
8597 	}
8598 
8599 	if (count != 0)
8600 		rnd_add_uint32(&sc->rnd_source, count);
8601 
8602 	DPRINTF(WM_DEBUG_RX,
8603 	    ("%s: RX: rxptr -> %d\n", device_xname(sc->sc_dev), i));
8604 
8605 	return more;
8606 }
8607 
8608 /*
8609  * wm_linkintr_gmii:
8610  *
8611  *	Helper; handle link interrupts for GMII.
8612  */
8613 static void
8614 wm_linkintr_gmii(struct wm_softc *sc, uint32_t icr)
8615 {
8616 
8617 	KASSERT(WM_CORE_LOCKED(sc));
8618 
8619 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8620 		__func__));
8621 
8622 	if (icr & ICR_LSC) {
8623 		uint32_t reg;
8624 		uint32_t status = CSR_READ(sc, WMREG_STATUS);
8625 
8626 		if ((status & STATUS_LU) != 0) {
8627 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
8628 				device_xname(sc->sc_dev),
8629 				(status & STATUS_FD) ? "FDX" : "HDX"));
8630 		} else {
8631 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8632 				device_xname(sc->sc_dev)));
8633 		}
8634 		if ((sc->sc_type == WM_T_ICH8) && ((status & STATUS_LU) == 0))
8635 			wm_gig_downshift_workaround_ich8lan(sc);
8636 
8637 		if ((sc->sc_type == WM_T_ICH8)
8638 		    && (sc->sc_phytype == WMPHY_IGP_3)) {
8639 			wm_kmrn_lock_loss_workaround_ich8lan(sc);
8640 		}
8641 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> mii_pollstat\n",
8642 			device_xname(sc->sc_dev)));
8643 		mii_pollstat(&sc->sc_mii);
8644 		if (sc->sc_type == WM_T_82543) {
8645 			int miistatus, active;
8646 
8647 			/*
8648 			 * With 82543, we need to force speed and
8649 			 * duplex on the MAC equal to what the PHY
8650 			 * speed and duplex configuration is.
8651 			 */
8652 			miistatus = sc->sc_mii.mii_media_status;
8653 
8654 			if (miistatus & IFM_ACTIVE) {
8655 				active = sc->sc_mii.mii_media_active;
8656 				sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
8657 				switch (IFM_SUBTYPE(active)) {
8658 				case IFM_10_T:
8659 					sc->sc_ctrl |= CTRL_SPEED_10;
8660 					break;
8661 				case IFM_100_TX:
8662 					sc->sc_ctrl |= CTRL_SPEED_100;
8663 					break;
8664 				case IFM_1000_T:
8665 					sc->sc_ctrl |= CTRL_SPEED_1000;
8666 					break;
8667 				default:
8668 					/*
8669 					 * fiber?
8670 					 * Shoud not enter here.
8671 					 */
8672 					printf("unknown media (%x)\n", active);
8673 					break;
8674 				}
8675 				if (active & IFM_FDX)
8676 					sc->sc_ctrl |= CTRL_FD;
8677 				CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
8678 			}
8679 		} else if (sc->sc_type == WM_T_PCH) {
8680 			wm_k1_gig_workaround_hv(sc,
8681 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
8682 		}
8683 
8684 		if ((sc->sc_phytype == WMPHY_82578)
8685 		    && (IFM_SUBTYPE(sc->sc_mii.mii_media_active)
8686 			== IFM_1000_T)) {
8687 
8688 			if ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0) {
8689 				delay(200*1000); /* XXX too big */
8690 
8691 				/* Link stall fix for link up */
8692 				wm_gmii_hv_writereg(sc->sc_dev, 1,
8693 				    HV_MUX_DATA_CTRL,
8694 				    HV_MUX_DATA_CTRL_GEN_TO_MAC
8695 				    | HV_MUX_DATA_CTRL_FORCE_SPEED);
8696 				wm_gmii_hv_writereg(sc->sc_dev, 1,
8697 				    HV_MUX_DATA_CTRL,
8698 				    HV_MUX_DATA_CTRL_GEN_TO_MAC);
8699 			}
8700 		}
8701 		/*
8702 		 * I217 Packet Loss issue:
8703 		 * ensure that FEXTNVM4 Beacon Duration is set correctly
8704 		 * on power up.
8705 		 * Set the Beacon Duration for I217 to 8 usec
8706 		 */
8707 		if (sc->sc_type >= WM_T_PCH_LPT) {
8708 			reg = CSR_READ(sc, WMREG_FEXTNVM4);
8709 			reg &= ~FEXTNVM4_BEACON_DURATION;
8710 			reg |= FEXTNVM4_BEACON_DURATION_8US;
8711 			CSR_WRITE(sc, WMREG_FEXTNVM4, reg);
8712 		}
8713 
8714 		/* XXX Work-around I218 hang issue */
8715 		/* e1000_k1_workaround_lpt_lp() */
8716 
8717 		if (sc->sc_type >= WM_T_PCH_LPT) {
8718 			/*
8719 			 * Set platform power management values for Latency
8720 			 * Tolerance Reporting (LTR)
8721 			 */
8722 			wm_platform_pm_pch_lpt(sc,
8723 			    ((sc->sc_mii.mii_media_status & IFM_ACTIVE) != 0));
8724 		}
8725 
8726 		/* FEXTNVM6 K1-off workaround */
8727 		if (sc->sc_type == WM_T_PCH_SPT) {
8728 			reg = CSR_READ(sc, WMREG_FEXTNVM6);
8729 			if (CSR_READ(sc, WMREG_PCIEANACFG)
8730 			    & FEXTNVM6_K1_OFF_ENABLE)
8731 				reg |= FEXTNVM6_K1_OFF_ENABLE;
8732 			else
8733 				reg &= ~FEXTNVM6_K1_OFF_ENABLE;
8734 			CSR_WRITE(sc, WMREG_FEXTNVM6, reg);
8735 		}
8736 	} else if (icr & ICR_RXSEQ) {
8737 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK Receive sequence error\n",
8738 			device_xname(sc->sc_dev)));
8739 	}
8740 }
8741 
8742 /*
8743  * wm_linkintr_tbi:
8744  *
8745  *	Helper; handle link interrupts for TBI mode.
8746  */
8747 static void
8748 wm_linkintr_tbi(struct wm_softc *sc, uint32_t icr)
8749 {
8750 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8751 	uint32_t status;
8752 
8753 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8754 		__func__));
8755 
8756 	status = CSR_READ(sc, WMREG_STATUS);
8757 	if (icr & ICR_LSC) {
8758 		if (status & STATUS_LU) {
8759 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up %s\n",
8760 				device_xname(sc->sc_dev),
8761 				(status & STATUS_FD) ? "FDX" : "HDX"));
8762 			/*
8763 			 * NOTE: CTRL will update TFCE and RFCE automatically,
8764 			 * so we should update sc->sc_ctrl
8765 			 */
8766 
8767 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
8768 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
8769 			sc->sc_fcrtl &= ~FCRTL_XONE;
8770 			if (status & STATUS_FD)
8771 				sc->sc_tctl |=
8772 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
8773 			else
8774 				sc->sc_tctl |=
8775 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
8776 			if (sc->sc_ctrl & CTRL_TFCE)
8777 				sc->sc_fcrtl |= FCRTL_XONE;
8778 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
8779 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
8780 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
8781 			sc->sc_tbi_linkup = 1;
8782 			if_link_state_change(ifp, LINK_STATE_UP);
8783 		} else {
8784 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8785 				device_xname(sc->sc_dev)));
8786 			sc->sc_tbi_linkup = 0;
8787 			if_link_state_change(ifp, LINK_STATE_DOWN);
8788 		}
8789 		/* Update LED */
8790 		wm_tbi_serdes_set_linkled(sc);
8791 	} else if (icr & ICR_RXSEQ) {
8792 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
8793 			device_xname(sc->sc_dev)));
8794 	}
8795 }
8796 
8797 /*
8798  * wm_linkintr_serdes:
8799  *
8800  *	Helper; handle link interrupts for TBI mode.
8801  */
8802 static void
8803 wm_linkintr_serdes(struct wm_softc *sc, uint32_t icr)
8804 {
8805 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
8806 	struct mii_data *mii = &sc->sc_mii;
8807 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
8808 	uint32_t pcs_adv, pcs_lpab, reg;
8809 
8810 	DPRINTF(WM_DEBUG_LINK, ("%s: %s:\n", device_xname(sc->sc_dev),
8811 		__func__));
8812 
8813 	if (icr & ICR_LSC) {
8814 		/* Check PCS */
8815 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
8816 		if ((reg & PCS_LSTS_LINKOK) != 0) {
8817 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> up\n",
8818 				device_xname(sc->sc_dev)));
8819 			mii->mii_media_status |= IFM_ACTIVE;
8820 			sc->sc_tbi_linkup = 1;
8821 			if_link_state_change(ifp, LINK_STATE_UP);
8822 		} else {
8823 			DPRINTF(WM_DEBUG_LINK, ("%s: LINK: LSC -> down\n",
8824 				device_xname(sc->sc_dev)));
8825 			mii->mii_media_status |= IFM_NONE;
8826 			sc->sc_tbi_linkup = 0;
8827 			if_link_state_change(ifp, LINK_STATE_DOWN);
8828 			wm_tbi_serdes_set_linkled(sc);
8829 			return;
8830 		}
8831 		mii->mii_media_active |= IFM_1000_SX;
8832 		if ((reg & PCS_LSTS_FDX) != 0)
8833 			mii->mii_media_active |= IFM_FDX;
8834 		else
8835 			mii->mii_media_active |= IFM_HDX;
8836 		if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
8837 			/* Check flow */
8838 			reg = CSR_READ(sc, WMREG_PCS_LSTS);
8839 			if ((reg & PCS_LSTS_AN_COMP) == 0) {
8840 				DPRINTF(WM_DEBUG_LINK,
8841 				    ("XXX LINKOK but not ACOMP\n"));
8842 				return;
8843 			}
8844 			pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
8845 			pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
8846 			DPRINTF(WM_DEBUG_LINK,
8847 			    ("XXX AN result %08x, %08x\n", pcs_adv, pcs_lpab));
8848 			if ((pcs_adv & TXCW_SYM_PAUSE)
8849 			    && (pcs_lpab & TXCW_SYM_PAUSE)) {
8850 				mii->mii_media_active |= IFM_FLOW
8851 				    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
8852 			} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
8853 			    && (pcs_adv & TXCW_ASYM_PAUSE)
8854 			    && (pcs_lpab & TXCW_SYM_PAUSE)
8855 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
8856 				mii->mii_media_active |= IFM_FLOW
8857 				    | IFM_ETH_TXPAUSE;
8858 			else if ((pcs_adv & TXCW_SYM_PAUSE)
8859 			    && (pcs_adv & TXCW_ASYM_PAUSE)
8860 			    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
8861 			    && (pcs_lpab & TXCW_ASYM_PAUSE))
8862 				mii->mii_media_active |= IFM_FLOW
8863 				    | IFM_ETH_RXPAUSE;
8864 		}
8865 		/* Update LED */
8866 		wm_tbi_serdes_set_linkled(sc);
8867 	} else {
8868 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: Receive sequence error\n",
8869 		    device_xname(sc->sc_dev)));
8870 	}
8871 }
8872 
8873 /*
8874  * wm_linkintr:
8875  *
8876  *	Helper; handle link interrupts.
8877  */
8878 static void
8879 wm_linkintr(struct wm_softc *sc, uint32_t icr)
8880 {
8881 
8882 	KASSERT(WM_CORE_LOCKED(sc));
8883 
8884 	if (sc->sc_flags & WM_F_HAS_MII)
8885 		wm_linkintr_gmii(sc, icr);
8886 	else if ((sc->sc_mediatype == WM_MEDIATYPE_SERDES)
8887 	    && (sc->sc_type >= WM_T_82575))
8888 		wm_linkintr_serdes(sc, icr);
8889 	else
8890 		wm_linkintr_tbi(sc, icr);
8891 }
8892 
8893 /*
8894  * wm_intr_legacy:
8895  *
8896  *	Interrupt service routine for INTx and MSI.
8897  */
8898 static int
8899 wm_intr_legacy(void *arg)
8900 {
8901 	struct wm_softc *sc = arg;
8902 	struct wm_queue *wmq = &sc->sc_queue[0];
8903 	struct wm_txqueue *txq = &wmq->wmq_txq;
8904 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
8905 	uint32_t icr, rndval = 0;
8906 	int handled = 0;
8907 
8908 	while (1 /* CONSTCOND */) {
8909 		icr = CSR_READ(sc, WMREG_ICR);
8910 		if ((icr & sc->sc_icr) == 0)
8911 			break;
8912 		if (handled == 0) {
8913 			DPRINTF(WM_DEBUG_TX,
8914 			    ("%s: INTx: got intr\n",device_xname(sc->sc_dev)));
8915 		}
8916 		if (rndval == 0)
8917 			rndval = icr;
8918 
8919 		mutex_enter(rxq->rxq_lock);
8920 
8921 		if (rxq->rxq_stopping) {
8922 			mutex_exit(rxq->rxq_lock);
8923 			break;
8924 		}
8925 
8926 		handled = 1;
8927 
8928 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8929 		if (icr & (ICR_RXDMT0 | ICR_RXT0)) {
8930 			DPRINTF(WM_DEBUG_RX,
8931 			    ("%s: RX: got Rx intr 0x%08x\n",
8932 				device_xname(sc->sc_dev),
8933 				icr & (ICR_RXDMT0 | ICR_RXT0)));
8934 			WM_Q_EVCNT_INCR(rxq, rxintr);
8935 		}
8936 #endif
8937 		/*
8938 		 * wm_rxeof() does *not* call upper layer functions directly,
8939 		 * as if_percpuq_enqueue() just call softint_schedule().
8940 		 * So, we can call wm_rxeof() in interrupt context.
8941 		 */
8942 		wm_rxeof(rxq, UINT_MAX);
8943 
8944 		mutex_exit(rxq->rxq_lock);
8945 		mutex_enter(txq->txq_lock);
8946 
8947 		if (txq->txq_stopping) {
8948 			mutex_exit(txq->txq_lock);
8949 			break;
8950 		}
8951 
8952 #if defined(WM_DEBUG) || defined(WM_EVENT_COUNTERS)
8953 		if (icr & ICR_TXDW) {
8954 			DPRINTF(WM_DEBUG_TX,
8955 			    ("%s: TX: got TXDW interrupt\n",
8956 				device_xname(sc->sc_dev)));
8957 			WM_Q_EVCNT_INCR(txq, txdw);
8958 		}
8959 #endif
8960 		wm_txeof(txq, UINT_MAX);
8961 
8962 		mutex_exit(txq->txq_lock);
8963 		WM_CORE_LOCK(sc);
8964 
8965 		if (sc->sc_core_stopping) {
8966 			WM_CORE_UNLOCK(sc);
8967 			break;
8968 		}
8969 
8970 		if (icr & (ICR_LSC | ICR_RXSEQ)) {
8971 			WM_EVCNT_INCR(&sc->sc_ev_linkintr);
8972 			wm_linkintr(sc, icr);
8973 		}
8974 
8975 		WM_CORE_UNLOCK(sc);
8976 
8977 		if (icr & ICR_RXO) {
8978 #if defined(WM_DEBUG)
8979 			log(LOG_WARNING, "%s: Receive overrun\n",
8980 			    device_xname(sc->sc_dev));
8981 #endif /* defined(WM_DEBUG) */
8982 		}
8983 	}
8984 
8985 	rnd_add_uint32(&sc->rnd_source, rndval);
8986 
8987 	if (handled) {
8988 		/* Try to get more packets going. */
8989 		softint_schedule(wmq->wmq_si);
8990 	}
8991 
8992 	return handled;
8993 }
8994 
8995 static inline void
8996 wm_txrxintr_disable(struct wm_queue *wmq)
8997 {
8998 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
8999 
9000 	if (sc->sc_type == WM_T_82574)
9001 		CSR_WRITE(sc, WMREG_IMC,
9002 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id));
9003 	else if (sc->sc_type == WM_T_82575)
9004 		CSR_WRITE(sc, WMREG_EIMC,
9005 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9006 	else
9007 		CSR_WRITE(sc, WMREG_EIMC, 1 << wmq->wmq_intr_idx);
9008 }
9009 
9010 static inline void
9011 wm_txrxintr_enable(struct wm_queue *wmq)
9012 {
9013 	struct wm_softc *sc = wmq->wmq_txq.txq_sc;
9014 
9015 	wm_itrs_calculate(sc, wmq);
9016 
9017 	/*
9018 	 * ICR_OTHER which is disabled in wm_linkintr_msix() is enabled here.
9019 	 * There is no need to care about which of RXQ(0) and RXQ(1) enable
9020 	 * ICR_OTHER in first, because each RXQ/TXQ interrupt is disabled
9021 	 * while each wm_handle_queue(wmq) is runnig.
9022 	 */
9023 	if (sc->sc_type == WM_T_82574)
9024 		CSR_WRITE(sc, WMREG_IMS,
9025 		    ICR_TXQ(wmq->wmq_id) | ICR_RXQ(wmq->wmq_id) | ICR_OTHER);
9026 	else if (sc->sc_type == WM_T_82575)
9027 		CSR_WRITE(sc, WMREG_EIMS,
9028 		    EITR_TX_QUEUE(wmq->wmq_id) | EITR_RX_QUEUE(wmq->wmq_id));
9029 	else
9030 		CSR_WRITE(sc, WMREG_EIMS, 1 << wmq->wmq_intr_idx);
9031 }
9032 
9033 static int
9034 wm_txrxintr_msix(void *arg)
9035 {
9036 	struct wm_queue *wmq = arg;
9037 	struct wm_txqueue *txq = &wmq->wmq_txq;
9038 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9039 	struct wm_softc *sc = txq->txq_sc;
9040 	u_int txlimit = sc->sc_tx_intr_process_limit;
9041 	u_int rxlimit = sc->sc_rx_intr_process_limit;
9042 	bool txmore;
9043 	bool rxmore;
9044 
9045 	KASSERT(wmq->wmq_intr_idx == wmq->wmq_id);
9046 
9047 	DPRINTF(WM_DEBUG_TX,
9048 	    ("%s: TX: got Tx intr\n", device_xname(sc->sc_dev)));
9049 
9050 	wm_txrxintr_disable(wmq);
9051 
9052 	mutex_enter(txq->txq_lock);
9053 
9054 	if (txq->txq_stopping) {
9055 		mutex_exit(txq->txq_lock);
9056 		return 0;
9057 	}
9058 
9059 	WM_Q_EVCNT_INCR(txq, txdw);
9060 	txmore = wm_txeof(txq, txlimit);
9061 	/* wm_deferred start() is done in wm_handle_queue(). */
9062 	mutex_exit(txq->txq_lock);
9063 
9064 	DPRINTF(WM_DEBUG_RX,
9065 	    ("%s: RX: got Rx intr\n", device_xname(sc->sc_dev)));
9066 	mutex_enter(rxq->rxq_lock);
9067 
9068 	if (rxq->rxq_stopping) {
9069 		mutex_exit(rxq->rxq_lock);
9070 		return 0;
9071 	}
9072 
9073 	WM_Q_EVCNT_INCR(rxq, rxintr);
9074 	rxmore = wm_rxeof(rxq, rxlimit);
9075 	mutex_exit(rxq->rxq_lock);
9076 
9077 	wm_itrs_writereg(sc, wmq);
9078 
9079 	if (txmore || rxmore)
9080 		softint_schedule(wmq->wmq_si);
9081 	else
9082 		wm_txrxintr_enable(wmq);
9083 
9084 	return 1;
9085 }
9086 
9087 static void
9088 wm_handle_queue(void *arg)
9089 {
9090 	struct wm_queue *wmq = arg;
9091 	struct wm_txqueue *txq = &wmq->wmq_txq;
9092 	struct wm_rxqueue *rxq = &wmq->wmq_rxq;
9093 	struct wm_softc *sc = txq->txq_sc;
9094 	u_int txlimit = sc->sc_tx_process_limit;
9095 	u_int rxlimit = sc->sc_rx_process_limit;
9096 	bool txmore;
9097 	bool rxmore;
9098 
9099 	mutex_enter(txq->txq_lock);
9100 	if (txq->txq_stopping) {
9101 		mutex_exit(txq->txq_lock);
9102 		return;
9103 	}
9104 	txmore = wm_txeof(txq, txlimit);
9105 	wm_deferred_start_locked(txq);
9106 	mutex_exit(txq->txq_lock);
9107 
9108 	mutex_enter(rxq->rxq_lock);
9109 	if (rxq->rxq_stopping) {
9110 		mutex_exit(rxq->rxq_lock);
9111 		return;
9112 	}
9113 	WM_Q_EVCNT_INCR(rxq, rxdefer);
9114 	rxmore = wm_rxeof(rxq, rxlimit);
9115 	mutex_exit(rxq->rxq_lock);
9116 
9117 	if (txmore || rxmore)
9118 		softint_schedule(wmq->wmq_si);
9119 	else
9120 		wm_txrxintr_enable(wmq);
9121 }
9122 
9123 /*
9124  * wm_linkintr_msix:
9125  *
9126  *	Interrupt service routine for link status change for MSI-X.
9127  */
9128 static int
9129 wm_linkintr_msix(void *arg)
9130 {
9131 	struct wm_softc *sc = arg;
9132 	uint32_t reg;
9133 	bool has_rxo;
9134 
9135 	DPRINTF(WM_DEBUG_LINK,
9136 	    ("%s: LINK: got link intr\n", device_xname(sc->sc_dev)));
9137 
9138 	reg = CSR_READ(sc, WMREG_ICR);
9139 	WM_CORE_LOCK(sc);
9140 	if (sc->sc_core_stopping)
9141 		goto out;
9142 
9143 	if ((reg & ICR_LSC) != 0) {
9144 		WM_EVCNT_INCR(&sc->sc_ev_linkintr);
9145 		wm_linkintr(sc, ICR_LSC);
9146 	}
9147 
9148 	/*
9149 	 * XXX 82574 MSI-X mode workaround
9150 	 *
9151 	 * 82574 MSI-X mode causes receive overrun(RXO) interrupt as ICR_OTHER
9152 	 * MSI-X vector, furthermore it does not cause neigher ICR_RXQ(0) nor
9153 	 * ICR_RXQ(1) vector. So, we generate ICR_RXQ(0) and ICR_RXQ(1)
9154 	 * interrupts by writing WMREG_ICS to process receive packets.
9155 	 */
9156 	if (sc->sc_type == WM_T_82574 && ((reg & ICR_RXO) != 0)) {
9157 #if defined(WM_DEBUG)
9158 		log(LOG_WARNING, "%s: Receive overrun\n",
9159 		    device_xname(sc->sc_dev));
9160 #endif /* defined(WM_DEBUG) */
9161 
9162 		has_rxo = true;
9163 		/*
9164 		 * The RXO interrupt is very high rate when receive traffic is
9165 		 * high rate. We use polling mode for ICR_OTHER like Tx/Rx
9166 		 * interrupts. ICR_OTHER will be enabled at the end of
9167 		 * wm_txrxintr_msix() which is kicked by both ICR_RXQ(0) and
9168 		 * ICR_RXQ(1) interrupts.
9169 		 */
9170 		CSR_WRITE(sc, WMREG_IMC, ICR_OTHER);
9171 
9172 		CSR_WRITE(sc, WMREG_ICS, ICR_RXQ(0) | ICR_RXQ(1));
9173 	}
9174 
9175 
9176 
9177 out:
9178 	WM_CORE_UNLOCK(sc);
9179 
9180 	if (sc->sc_type == WM_T_82574) {
9181 		if (!has_rxo)
9182 			CSR_WRITE(sc, WMREG_IMS, ICR_OTHER | ICR_LSC);
9183 		else
9184 			CSR_WRITE(sc, WMREG_IMS, ICR_LSC);
9185 	} else if (sc->sc_type == WM_T_82575)
9186 		CSR_WRITE(sc, WMREG_EIMS, EITR_OTHER);
9187 	else
9188 		CSR_WRITE(sc, WMREG_EIMS, 1 << sc->sc_link_intr_idx);
9189 
9190 	return 1;
9191 }
9192 
9193 /*
9194  * Media related.
9195  * GMII, SGMII, TBI (and SERDES)
9196  */
9197 
9198 /* Common */
9199 
9200 /*
9201  * wm_tbi_serdes_set_linkled:
9202  *
9203  *	Update the link LED on TBI and SERDES devices.
9204  */
9205 static void
9206 wm_tbi_serdes_set_linkled(struct wm_softc *sc)
9207 {
9208 
9209 	if (sc->sc_tbi_linkup)
9210 		sc->sc_ctrl |= CTRL_SWDPIN(0);
9211 	else
9212 		sc->sc_ctrl &= ~CTRL_SWDPIN(0);
9213 
9214 	/* 82540 or newer devices are active low */
9215 	sc->sc_ctrl ^= (sc->sc_type >= WM_T_82540) ? CTRL_SWDPIN(0) : 0;
9216 
9217 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9218 }
9219 
9220 /* GMII related */
9221 
9222 /*
9223  * wm_gmii_reset:
9224  *
9225  *	Reset the PHY.
9226  */
9227 static void
9228 wm_gmii_reset(struct wm_softc *sc)
9229 {
9230 	uint32_t reg;
9231 	int rv;
9232 
9233 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
9234 		device_xname(sc->sc_dev), __func__));
9235 
9236 	rv = sc->phy.acquire(sc);
9237 	if (rv != 0) {
9238 		aprint_error_dev(sc->sc_dev, "%s: failed to get semaphore\n",
9239 		    __func__);
9240 		return;
9241 	}
9242 
9243 	switch (sc->sc_type) {
9244 	case WM_T_82542_2_0:
9245 	case WM_T_82542_2_1:
9246 		/* null */
9247 		break;
9248 	case WM_T_82543:
9249 		/*
9250 		 * With 82543, we need to force speed and duplex on the MAC
9251 		 * equal to what the PHY speed and duplex configuration is.
9252 		 * In addition, we need to perform a hardware reset on the PHY
9253 		 * to take it out of reset.
9254 		 */
9255 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9256 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9257 
9258 		/* The PHY reset pin is active-low. */
9259 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
9260 		reg &= ~((CTRL_EXT_SWDPIO_MASK << CTRL_EXT_SWDPIO_SHIFT) |
9261 		    CTRL_EXT_SWDPIN(4));
9262 		reg |= CTRL_EXT_SWDPIO(4);
9263 
9264 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
9265 		CSR_WRITE_FLUSH(sc);
9266 		delay(10*1000);
9267 
9268 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_SWDPIN(4));
9269 		CSR_WRITE_FLUSH(sc);
9270 		delay(150);
9271 #if 0
9272 		sc->sc_ctrl_ext = reg | CTRL_EXT_SWDPIN(4);
9273 #endif
9274 		delay(20*1000);	/* XXX extra delay to get PHY ID? */
9275 		break;
9276 	case WM_T_82544:	/* reset 10000us */
9277 	case WM_T_82540:
9278 	case WM_T_82545:
9279 	case WM_T_82545_3:
9280 	case WM_T_82546:
9281 	case WM_T_82546_3:
9282 	case WM_T_82541:
9283 	case WM_T_82541_2:
9284 	case WM_T_82547:
9285 	case WM_T_82547_2:
9286 	case WM_T_82571:	/* reset 100us */
9287 	case WM_T_82572:
9288 	case WM_T_82573:
9289 	case WM_T_82574:
9290 	case WM_T_82575:
9291 	case WM_T_82576:
9292 	case WM_T_82580:
9293 	case WM_T_I350:
9294 	case WM_T_I354:
9295 	case WM_T_I210:
9296 	case WM_T_I211:
9297 	case WM_T_82583:
9298 	case WM_T_80003:
9299 		/* generic reset */
9300 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9301 		CSR_WRITE_FLUSH(sc);
9302 		delay(20000);
9303 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9304 		CSR_WRITE_FLUSH(sc);
9305 		delay(20000);
9306 
9307 		if ((sc->sc_type == WM_T_82541)
9308 		    || (sc->sc_type == WM_T_82541_2)
9309 		    || (sc->sc_type == WM_T_82547)
9310 		    || (sc->sc_type == WM_T_82547_2)) {
9311 			/* workaround for igp are done in igp_reset() */
9312 			/* XXX add code to set LED after phy reset */
9313 		}
9314 		break;
9315 	case WM_T_ICH8:
9316 	case WM_T_ICH9:
9317 	case WM_T_ICH10:
9318 	case WM_T_PCH:
9319 	case WM_T_PCH2:
9320 	case WM_T_PCH_LPT:
9321 	case WM_T_PCH_SPT:
9322 	case WM_T_PCH_CNP:
9323 		/* generic reset */
9324 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
9325 		CSR_WRITE_FLUSH(sc);
9326 		delay(100);
9327 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9328 		CSR_WRITE_FLUSH(sc);
9329 		delay(150);
9330 		break;
9331 	default:
9332 		panic("%s: %s: unknown type\n", device_xname(sc->sc_dev),
9333 		    __func__);
9334 		break;
9335 	}
9336 
9337 	sc->phy.release(sc);
9338 
9339 	/* get_cfg_done */
9340 	wm_get_cfg_done(sc);
9341 
9342 	/* extra setup */
9343 	switch (sc->sc_type) {
9344 	case WM_T_82542_2_0:
9345 	case WM_T_82542_2_1:
9346 	case WM_T_82543:
9347 	case WM_T_82544:
9348 	case WM_T_82540:
9349 	case WM_T_82545:
9350 	case WM_T_82545_3:
9351 	case WM_T_82546:
9352 	case WM_T_82546_3:
9353 	case WM_T_82541_2:
9354 	case WM_T_82547_2:
9355 	case WM_T_82571:
9356 	case WM_T_82572:
9357 	case WM_T_82573:
9358 	case WM_T_82574:
9359 	case WM_T_82583:
9360 	case WM_T_82575:
9361 	case WM_T_82576:
9362 	case WM_T_82580:
9363 	case WM_T_I350:
9364 	case WM_T_I354:
9365 	case WM_T_I210:
9366 	case WM_T_I211:
9367 	case WM_T_80003:
9368 		/* null */
9369 		break;
9370 	case WM_T_82541:
9371 	case WM_T_82547:
9372 		/* XXX Configure actively LED after PHY reset */
9373 		break;
9374 	case WM_T_ICH8:
9375 	case WM_T_ICH9:
9376 	case WM_T_ICH10:
9377 	case WM_T_PCH:
9378 	case WM_T_PCH2:
9379 	case WM_T_PCH_LPT:
9380 	case WM_T_PCH_SPT:
9381 	case WM_T_PCH_CNP:
9382 		wm_phy_post_reset(sc);
9383 		break;
9384 	default:
9385 		panic("%s: unknown type\n", __func__);
9386 		break;
9387 	}
9388 }
9389 
9390 /*
9391  * Setup sc_phytype and mii_{read|write}reg.
9392  *
9393  *  To identify PHY type, correct read/write function should be selected.
9394  * To select correct read/write function, PCI ID or MAC type are required
9395  * without accessing PHY registers.
9396  *
9397  *  On the first call of this function, PHY ID is not known yet. Check
9398  * PCI ID or MAC type. The list of the PCI ID may not be perfect, so the
9399  * result might be incorrect.
9400  *
9401  *  In the second call, PHY OUI and model is used to identify PHY type.
9402  * It might not be perfpect because of the lack of compared entry, but it
9403  * would be better than the first call.
9404  *
9405  *  If the detected new result and previous assumption is different,
9406  * diagnous message will be printed.
9407  */
9408 static void
9409 wm_gmii_setup_phytype(struct wm_softc *sc, uint32_t phy_oui,
9410     uint16_t phy_model)
9411 {
9412 	device_t dev = sc->sc_dev;
9413 	struct mii_data *mii = &sc->sc_mii;
9414 	uint16_t new_phytype = WMPHY_UNKNOWN;
9415 	uint16_t doubt_phytype = WMPHY_UNKNOWN;
9416 	mii_readreg_t new_readreg;
9417 	mii_writereg_t new_writereg;
9418 
9419 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
9420 		device_xname(sc->sc_dev), __func__));
9421 
9422 	if (mii->mii_readreg == NULL) {
9423 		/*
9424 		 *  This is the first call of this function. For ICH and PCH
9425 		 * variants, it's difficult to determine the PHY access method
9426 		 * by sc_type, so use the PCI product ID for some devices.
9427 		 */
9428 
9429 		switch (sc->sc_pcidevid) {
9430 		case PCI_PRODUCT_INTEL_PCH_M_LM:
9431 		case PCI_PRODUCT_INTEL_PCH_M_LC:
9432 			/* 82577 */
9433 			new_phytype = WMPHY_82577;
9434 			break;
9435 		case PCI_PRODUCT_INTEL_PCH_D_DM:
9436 		case PCI_PRODUCT_INTEL_PCH_D_DC:
9437 			/* 82578 */
9438 			new_phytype = WMPHY_82578;
9439 			break;
9440 		case PCI_PRODUCT_INTEL_PCH2_LV_LM:
9441 		case PCI_PRODUCT_INTEL_PCH2_LV_V:
9442 			/* 82579 */
9443 			new_phytype = WMPHY_82579;
9444 			break;
9445 		case PCI_PRODUCT_INTEL_82801H_82567V_3:
9446 		case PCI_PRODUCT_INTEL_82801I_BM:
9447 		case PCI_PRODUCT_INTEL_82801I_IGP_M_AMT: /* Not IGP but BM */
9448 		case PCI_PRODUCT_INTEL_82801J_R_BM_LM:
9449 		case PCI_PRODUCT_INTEL_82801J_R_BM_LF:
9450 		case PCI_PRODUCT_INTEL_82801J_D_BM_LM:
9451 		case PCI_PRODUCT_INTEL_82801J_D_BM_LF:
9452 		case PCI_PRODUCT_INTEL_82801J_R_BM_V:
9453 			/* ICH8, 9, 10 with 82567 */
9454 			new_phytype = WMPHY_BM;
9455 			break;
9456 		default:
9457 			break;
9458 		}
9459 	} else {
9460 		/* It's not the first call. Use PHY OUI and model */
9461 		switch (phy_oui) {
9462 		case MII_OUI_ATHEROS: /* XXX ??? */
9463 			switch (phy_model) {
9464 			case 0x0004: /* XXX */
9465 				new_phytype = WMPHY_82578;
9466 				break;
9467 			default:
9468 				break;
9469 			}
9470 			break;
9471 		case MII_OUI_xxMARVELL:
9472 			switch (phy_model) {
9473 			case MII_MODEL_xxMARVELL_I210:
9474 				new_phytype = WMPHY_I210;
9475 				break;
9476 			case MII_MODEL_xxMARVELL_E1011:
9477 			case MII_MODEL_xxMARVELL_E1000_3:
9478 			case MII_MODEL_xxMARVELL_E1000_5:
9479 			case MII_MODEL_xxMARVELL_E1112:
9480 				new_phytype = WMPHY_M88;
9481 				break;
9482 			case MII_MODEL_xxMARVELL_E1149:
9483 				new_phytype = WMPHY_BM;
9484 				break;
9485 			case MII_MODEL_xxMARVELL_E1111:
9486 			case MII_MODEL_xxMARVELL_I347:
9487 			case MII_MODEL_xxMARVELL_E1512:
9488 			case MII_MODEL_xxMARVELL_E1340M:
9489 			case MII_MODEL_xxMARVELL_E1543:
9490 				new_phytype = WMPHY_M88;
9491 				break;
9492 			case MII_MODEL_xxMARVELL_I82563:
9493 				new_phytype = WMPHY_GG82563;
9494 				break;
9495 			default:
9496 				break;
9497 			}
9498 			break;
9499 		case MII_OUI_INTEL:
9500 			switch (phy_model) {
9501 			case MII_MODEL_INTEL_I82577:
9502 				new_phytype = WMPHY_82577;
9503 				break;
9504 			case MII_MODEL_INTEL_I82579:
9505 				new_phytype = WMPHY_82579;
9506 				break;
9507 			case MII_MODEL_INTEL_I217:
9508 				new_phytype = WMPHY_I217;
9509 				break;
9510 			case MII_MODEL_INTEL_I82580:
9511 			case MII_MODEL_INTEL_I350:
9512 				new_phytype = WMPHY_82580;
9513 				break;
9514 			default:
9515 				break;
9516 			}
9517 			break;
9518 		case MII_OUI_yyINTEL:
9519 			switch (phy_model) {
9520 			case MII_MODEL_yyINTEL_I82562G:
9521 			case MII_MODEL_yyINTEL_I82562EM:
9522 			case MII_MODEL_yyINTEL_I82562ET:
9523 				new_phytype = WMPHY_IFE;
9524 				break;
9525 			case MII_MODEL_yyINTEL_IGP01E1000:
9526 				new_phytype = WMPHY_IGP;
9527 				break;
9528 			case MII_MODEL_yyINTEL_I82566:
9529 				new_phytype = WMPHY_IGP_3;
9530 				break;
9531 			default:
9532 				break;
9533 			}
9534 			break;
9535 		default:
9536 			break;
9537 		}
9538 		if (new_phytype == WMPHY_UNKNOWN)
9539 			aprint_verbose_dev(dev, "%s: unknown PHY model\n",
9540 			    __func__);
9541 
9542 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9543 		    && (sc->sc_phytype != new_phytype )) {
9544 			aprint_error_dev(dev, "Previously assumed PHY type(%u)"
9545 			    "was incorrect. PHY type from PHY ID = %u\n",
9546 			    sc->sc_phytype, new_phytype);
9547 		}
9548 	}
9549 
9550 	/* Next, use sc->sc_flags and sc->sc_type to set read/write funcs. */
9551 	if (((sc->sc_flags & WM_F_SGMII) != 0) && !wm_sgmii_uses_mdio(sc)) {
9552 		/* SGMII */
9553 		new_readreg = wm_sgmii_readreg;
9554 		new_writereg = wm_sgmii_writereg;
9555 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
9556 		/* BM2 (phyaddr == 1) */
9557 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9558 		    && (new_phytype != WMPHY_BM)
9559 		    && (new_phytype != WMPHY_UNKNOWN))
9560 			doubt_phytype = new_phytype;
9561 		new_phytype = WMPHY_BM;
9562 		new_readreg = wm_gmii_bm_readreg;
9563 		new_writereg = wm_gmii_bm_writereg;
9564 	} else if (sc->sc_type >= WM_T_PCH) {
9565 		/* All PCH* use _hv_ */
9566 		new_readreg = wm_gmii_hv_readreg;
9567 		new_writereg = wm_gmii_hv_writereg;
9568 	} else if (sc->sc_type >= WM_T_ICH8) {
9569 		/* non-82567 ICH8, 9 and 10 */
9570 		new_readreg = wm_gmii_i82544_readreg;
9571 		new_writereg = wm_gmii_i82544_writereg;
9572 	} else if (sc->sc_type >= WM_T_80003) {
9573 		/* 80003 */
9574 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9575 		    && (new_phytype != WMPHY_GG82563)
9576 		    && (new_phytype != WMPHY_UNKNOWN))
9577 			doubt_phytype = new_phytype;
9578 		new_phytype = WMPHY_GG82563;
9579 		new_readreg = wm_gmii_i80003_readreg;
9580 		new_writereg = wm_gmii_i80003_writereg;
9581 	} else if (sc->sc_type >= WM_T_I210) {
9582 		/* I210 and I211 */
9583 		if ((sc->sc_phytype != WMPHY_UNKNOWN)
9584 		    && (new_phytype != WMPHY_I210)
9585 		    && (new_phytype != WMPHY_UNKNOWN))
9586 			doubt_phytype = new_phytype;
9587 		new_phytype = WMPHY_I210;
9588 		new_readreg = wm_gmii_gs40g_readreg;
9589 		new_writereg = wm_gmii_gs40g_writereg;
9590 	} else if (sc->sc_type >= WM_T_82580) {
9591 		/* 82580, I350 and I354 */
9592 		new_readreg = wm_gmii_82580_readreg;
9593 		new_writereg = wm_gmii_82580_writereg;
9594 	} else if (sc->sc_type >= WM_T_82544) {
9595 		/* 82544, 0, [56], [17], 8257[1234] and 82583 */
9596 		new_readreg = wm_gmii_i82544_readreg;
9597 		new_writereg = wm_gmii_i82544_writereg;
9598 	} else {
9599 		new_readreg = wm_gmii_i82543_readreg;
9600 		new_writereg = wm_gmii_i82543_writereg;
9601 	}
9602 
9603 	if (new_phytype == WMPHY_BM) {
9604 		/* All BM use _bm_ */
9605 		new_readreg = wm_gmii_bm_readreg;
9606 		new_writereg = wm_gmii_bm_writereg;
9607 	}
9608 	if ((sc->sc_type >= WM_T_PCH) && (sc->sc_type <= WM_T_PCH_CNP)) {
9609 		/* All PCH* use _hv_ */
9610 		new_readreg = wm_gmii_hv_readreg;
9611 		new_writereg = wm_gmii_hv_writereg;
9612 	}
9613 
9614 	/* Diag output */
9615 	if (doubt_phytype != WMPHY_UNKNOWN)
9616 		aprint_error_dev(dev, "Assumed new PHY type was "
9617 		    "incorrect. old = %u, new = %u\n", sc->sc_phytype,
9618 		    new_phytype);
9619 	else if ((sc->sc_phytype != WMPHY_UNKNOWN)
9620 	    && (sc->sc_phytype != new_phytype ))
9621 		aprint_error_dev(dev, "Previously assumed PHY type(%u)"
9622 		    "was incorrect. New PHY type = %u\n",
9623 		    sc->sc_phytype, new_phytype);
9624 
9625 	if ((mii->mii_readreg != NULL) && (new_phytype == WMPHY_UNKNOWN))
9626 		aprint_error_dev(dev, "PHY type is still unknown.\n");
9627 
9628 	if ((mii->mii_readreg != NULL) && (mii->mii_readreg != new_readreg))
9629 		aprint_error_dev(dev, "Previously assumed PHY read/write "
9630 		    "function was incorrect.\n");
9631 
9632 	/* Update now */
9633 	sc->sc_phytype = new_phytype;
9634 	mii->mii_readreg = new_readreg;
9635 	mii->mii_writereg = new_writereg;
9636 }
9637 
9638 /*
9639  * wm_get_phy_id_82575:
9640  *
9641  * Return PHY ID. Return -1 if it failed.
9642  */
9643 static int
9644 wm_get_phy_id_82575(struct wm_softc *sc)
9645 {
9646 	uint32_t reg;
9647 	int phyid = -1;
9648 
9649 	/* XXX */
9650 	if ((sc->sc_flags & WM_F_SGMII) == 0)
9651 		return -1;
9652 
9653 	if (wm_sgmii_uses_mdio(sc)) {
9654 		switch (sc->sc_type) {
9655 		case WM_T_82575:
9656 		case WM_T_82576:
9657 			reg = CSR_READ(sc, WMREG_MDIC);
9658 			phyid = (reg & MDIC_PHY_MASK) >> MDIC_PHY_SHIFT;
9659 			break;
9660 		case WM_T_82580:
9661 		case WM_T_I350:
9662 		case WM_T_I354:
9663 		case WM_T_I210:
9664 		case WM_T_I211:
9665 			reg = CSR_READ(sc, WMREG_MDICNFG);
9666 			phyid = (reg & MDICNFG_PHY_MASK) >> MDICNFG_PHY_SHIFT;
9667 			break;
9668 		default:
9669 			return -1;
9670 		}
9671 	}
9672 
9673 	return phyid;
9674 }
9675 
9676 
9677 /*
9678  * wm_gmii_mediainit:
9679  *
9680  *	Initialize media for use on 1000BASE-T devices.
9681  */
9682 static void
9683 wm_gmii_mediainit(struct wm_softc *sc, pci_product_id_t prodid)
9684 {
9685 	device_t dev = sc->sc_dev;
9686 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
9687 	struct mii_data *mii = &sc->sc_mii;
9688 	uint32_t reg;
9689 
9690 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9691 		device_xname(sc->sc_dev), __func__));
9692 
9693 	/* We have GMII. */
9694 	sc->sc_flags |= WM_F_HAS_MII;
9695 
9696 	if (sc->sc_type == WM_T_80003)
9697 		sc->sc_tipg =  TIPG_1000T_80003_DFLT;
9698 	else
9699 		sc->sc_tipg = TIPG_1000T_DFLT;
9700 
9701 	/* XXX Not for I354? FreeBSD's e1000_82575.c doesn't include it */
9702 	if ((sc->sc_type == WM_T_82580)
9703 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I210)
9704 	    || (sc->sc_type == WM_T_I211)) {
9705 		reg = CSR_READ(sc, WMREG_PHPM);
9706 		reg &= ~PHPM_GO_LINK_D;
9707 		CSR_WRITE(sc, WMREG_PHPM, reg);
9708 	}
9709 
9710 	/*
9711 	 * Let the chip set speed/duplex on its own based on
9712 	 * signals from the PHY.
9713 	 * XXXbouyer - I'm not sure this is right for the 80003,
9714 	 * the em driver only sets CTRL_SLU here - but it seems to work.
9715 	 */
9716 	sc->sc_ctrl |= CTRL_SLU;
9717 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9718 
9719 	/* Initialize our media structures and probe the GMII. */
9720 	mii->mii_ifp = ifp;
9721 
9722 	mii->mii_statchg = wm_gmii_statchg;
9723 
9724 	/* get PHY control from SMBus to PCIe */
9725 	if ((sc->sc_type == WM_T_PCH) || (sc->sc_type == WM_T_PCH2)
9726 	    || (sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
9727 	    || (sc->sc_type == WM_T_PCH_CNP))
9728 		wm_smbustopci(sc);
9729 
9730 	wm_gmii_reset(sc);
9731 
9732 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
9733 	ifmedia_init(&mii->mii_media, IFM_IMASK, wm_gmii_mediachange,
9734 	    wm_gmii_mediastatus);
9735 
9736 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576)
9737 	    || (sc->sc_type == WM_T_82580)
9738 	    || (sc->sc_type == WM_T_I350) || (sc->sc_type == WM_T_I354)
9739 	    || (sc->sc_type == WM_T_I210) || (sc->sc_type == WM_T_I211)) {
9740 		if ((sc->sc_flags & WM_F_SGMII) == 0) {
9741 			/* Attach only one port */
9742 			mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, 1,
9743 			    MII_OFFSET_ANY, MIIF_DOPAUSE);
9744 		} else {
9745 			int i, id;
9746 			uint32_t ctrl_ext;
9747 
9748 			id = wm_get_phy_id_82575(sc);
9749 			if (id != -1) {
9750 				mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff,
9751 				    id, MII_OFFSET_ANY, MIIF_DOPAUSE);
9752 			}
9753 			if ((id == -1)
9754 			    || (LIST_FIRST(&mii->mii_phys) == NULL)) {
9755 				/* Power on sgmii phy if it is disabled */
9756 				ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
9757 				CSR_WRITE(sc, WMREG_CTRL_EXT,
9758 				    ctrl_ext &~ CTRL_EXT_SWDPIN(3));
9759 				CSR_WRITE_FLUSH(sc);
9760 				delay(300*1000); /* XXX too long */
9761 
9762 				/* from 1 to 8 */
9763 				for (i = 1; i < 8; i++)
9764 					mii_attach(sc->sc_dev, &sc->sc_mii,
9765 					    0xffffffff, i, MII_OFFSET_ANY,
9766 					    MIIF_DOPAUSE);
9767 
9768 				/* restore previous sfp cage power state */
9769 				CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
9770 			}
9771 		}
9772 	} else {
9773 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9774 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
9775 	}
9776 
9777 	/*
9778 	 * If the MAC is PCH2 or PCH_LPT and failed to detect MII PHY, call
9779 	 * wm_set_mdio_slow_mode_hv() for a workaround and retry.
9780 	 */
9781 	if (((sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)
9782 		|| (sc->sc_type == WM_T_PCH_SPT)
9783 		|| (sc->sc_type == WM_T_PCH_CNP))
9784 	    && (LIST_FIRST(&mii->mii_phys) == NULL)) {
9785 		wm_set_mdio_slow_mode_hv(sc);
9786 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9787 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
9788 	}
9789 
9790 	/*
9791 	 * (For ICH8 variants)
9792 	 * If PHY detection failed, use BM's r/w function and retry.
9793 	 */
9794 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
9795 		/* if failed, retry with *_bm_* */
9796 		aprint_verbose_dev(dev, "Assumed PHY access function "
9797 		    "(type = %d) might be incorrect. Use BM and retry.\n",
9798 		    sc->sc_phytype);
9799 		sc->sc_phytype = WMPHY_BM;
9800 		mii->mii_readreg = wm_gmii_bm_readreg;
9801 		mii->mii_writereg = wm_gmii_bm_writereg;
9802 
9803 		mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY,
9804 		    MII_OFFSET_ANY, MIIF_DOPAUSE);
9805 	}
9806 
9807 	if (LIST_FIRST(&mii->mii_phys) == NULL) {
9808 		/* Any PHY wasn't find */
9809 		ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL);
9810 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE);
9811 		sc->sc_phytype = WMPHY_NONE;
9812 	} else {
9813 		struct mii_softc *child = LIST_FIRST(&mii->mii_phys);
9814 
9815 		/*
9816 		 * PHY Found! Check PHY type again by the second call of
9817 		 * wm_gmii_setup_phytype.
9818 		 */
9819 		wm_gmii_setup_phytype(sc, child->mii_mpd_oui,
9820 		    child->mii_mpd_model);
9821 
9822 		ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO);
9823 	}
9824 }
9825 
9826 /*
9827  * wm_gmii_mediachange:	[ifmedia interface function]
9828  *
9829  *	Set hardware to newly-selected media on a 1000BASE-T device.
9830  */
9831 static int
9832 wm_gmii_mediachange(struct ifnet *ifp)
9833 {
9834 	struct wm_softc *sc = ifp->if_softc;
9835 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
9836 	int rc;
9837 
9838 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
9839 		device_xname(sc->sc_dev), __func__));
9840 	if ((ifp->if_flags & IFF_UP) == 0)
9841 		return 0;
9842 
9843 	/* Disable D0 LPLU. */
9844 	wm_lplu_d0_disable(sc);
9845 
9846 	sc->sc_ctrl &= ~(CTRL_SPEED_MASK | CTRL_FD);
9847 	sc->sc_ctrl |= CTRL_SLU;
9848 	if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
9849 	    || (sc->sc_type > WM_T_82543)) {
9850 		sc->sc_ctrl &= ~(CTRL_FRCSPD | CTRL_FRCFDX);
9851 	} else {
9852 		sc->sc_ctrl &= ~CTRL_ASDE;
9853 		sc->sc_ctrl |= CTRL_FRCSPD | CTRL_FRCFDX;
9854 		if (ife->ifm_media & IFM_FDX)
9855 			sc->sc_ctrl |= CTRL_FD;
9856 		switch (IFM_SUBTYPE(ife->ifm_media)) {
9857 		case IFM_10_T:
9858 			sc->sc_ctrl |= CTRL_SPEED_10;
9859 			break;
9860 		case IFM_100_TX:
9861 			sc->sc_ctrl |= CTRL_SPEED_100;
9862 			break;
9863 		case IFM_1000_T:
9864 			sc->sc_ctrl |= CTRL_SPEED_1000;
9865 			break;
9866 		default:
9867 			panic("wm_gmii_mediachange: bad media 0x%x",
9868 			    ife->ifm_media);
9869 		}
9870 	}
9871 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
9872 	CSR_WRITE_FLUSH(sc);
9873 	if (sc->sc_type <= WM_T_82543)
9874 		wm_gmii_reset(sc);
9875 
9876 	if ((rc = mii_mediachg(&sc->sc_mii)) == ENXIO)
9877 		return 0;
9878 	return rc;
9879 }
9880 
9881 /*
9882  * wm_gmii_mediastatus:	[ifmedia interface function]
9883  *
9884  *	Get the current interface media status on a 1000BASE-T device.
9885  */
9886 static void
9887 wm_gmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
9888 {
9889 	struct wm_softc *sc = ifp->if_softc;
9890 
9891 	ether_mediastatus(ifp, ifmr);
9892 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
9893 	    | sc->sc_flowflags;
9894 }
9895 
9896 #define	MDI_IO		CTRL_SWDPIN(2)
9897 #define	MDI_DIR		CTRL_SWDPIO(2)	/* host -> PHY */
9898 #define	MDI_CLK		CTRL_SWDPIN(3)
9899 
9900 static void
9901 wm_i82543_mii_sendbits(struct wm_softc *sc, uint32_t data, int nbits)
9902 {
9903 	uint32_t i, v;
9904 
9905 	v = CSR_READ(sc, WMREG_CTRL);
9906 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
9907 	v |= MDI_DIR | CTRL_SWDPIO(3);
9908 
9909 	for (i = 1 << (nbits - 1); i != 0; i >>= 1) {
9910 		if (data & i)
9911 			v |= MDI_IO;
9912 		else
9913 			v &= ~MDI_IO;
9914 		CSR_WRITE(sc, WMREG_CTRL, v);
9915 		CSR_WRITE_FLUSH(sc);
9916 		delay(10);
9917 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9918 		CSR_WRITE_FLUSH(sc);
9919 		delay(10);
9920 		CSR_WRITE(sc, WMREG_CTRL, v);
9921 		CSR_WRITE_FLUSH(sc);
9922 		delay(10);
9923 	}
9924 }
9925 
9926 static uint32_t
9927 wm_i82543_mii_recvbits(struct wm_softc *sc)
9928 {
9929 	uint32_t v, i, data = 0;
9930 
9931 	v = CSR_READ(sc, WMREG_CTRL);
9932 	v &= ~(MDI_IO | MDI_CLK | (CTRL_SWDPIO_MASK << CTRL_SWDPIO_SHIFT));
9933 	v |= CTRL_SWDPIO(3);
9934 
9935 	CSR_WRITE(sc, WMREG_CTRL, v);
9936 	CSR_WRITE_FLUSH(sc);
9937 	delay(10);
9938 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9939 	CSR_WRITE_FLUSH(sc);
9940 	delay(10);
9941 	CSR_WRITE(sc, WMREG_CTRL, v);
9942 	CSR_WRITE_FLUSH(sc);
9943 	delay(10);
9944 
9945 	for (i = 0; i < 16; i++) {
9946 		data <<= 1;
9947 		CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9948 		CSR_WRITE_FLUSH(sc);
9949 		delay(10);
9950 		if (CSR_READ(sc, WMREG_CTRL) & MDI_IO)
9951 			data |= 1;
9952 		CSR_WRITE(sc, WMREG_CTRL, v);
9953 		CSR_WRITE_FLUSH(sc);
9954 		delay(10);
9955 	}
9956 
9957 	CSR_WRITE(sc, WMREG_CTRL, v | MDI_CLK);
9958 	CSR_WRITE_FLUSH(sc);
9959 	delay(10);
9960 	CSR_WRITE(sc, WMREG_CTRL, v);
9961 	CSR_WRITE_FLUSH(sc);
9962 	delay(10);
9963 
9964 	return data;
9965 }
9966 
9967 #undef MDI_IO
9968 #undef MDI_DIR
9969 #undef MDI_CLK
9970 
9971 /*
9972  * wm_gmii_i82543_readreg:	[mii interface function]
9973  *
9974  *	Read a PHY register on the GMII (i82543 version).
9975  */
9976 static int
9977 wm_gmii_i82543_readreg(device_t dev, int phy, int reg)
9978 {
9979 	struct wm_softc *sc = device_private(dev);
9980 	int rv;
9981 
9982 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
9983 	wm_i82543_mii_sendbits(sc, reg | (phy << 5) |
9984 	    (MII_COMMAND_READ << 10) | (MII_COMMAND_START << 12), 14);
9985 	rv = wm_i82543_mii_recvbits(sc) & 0xffff;
9986 
9987 	DPRINTF(WM_DEBUG_GMII, ("%s: GMII: read phy %d reg %d -> 0x%04x\n",
9988 		device_xname(dev), phy, reg, rv));
9989 
9990 	return rv;
9991 }
9992 
9993 /*
9994  * wm_gmii_i82543_writereg:	[mii interface function]
9995  *
9996  *	Write a PHY register on the GMII (i82543 version).
9997  */
9998 static void
9999 wm_gmii_i82543_writereg(device_t dev, int phy, int reg, int val)
10000 {
10001 	struct wm_softc *sc = device_private(dev);
10002 
10003 	wm_i82543_mii_sendbits(sc, 0xffffffffU, 32);
10004 	wm_i82543_mii_sendbits(sc, val | (MII_COMMAND_ACK << 16) |
10005 	    (reg << 18) | (phy << 23) | (MII_COMMAND_WRITE << 28) |
10006 	    (MII_COMMAND_START << 30), 32);
10007 }
10008 
10009 /*
10010  * wm_gmii_mdic_readreg:	[mii interface function]
10011  *
10012  *	Read a PHY register on the GMII.
10013  */
10014 static int
10015 wm_gmii_mdic_readreg(device_t dev, int phy, int reg)
10016 {
10017 	struct wm_softc *sc = device_private(dev);
10018 	uint32_t mdic = 0;
10019 	int i, rv;
10020 
10021 	if (reg > MII_ADDRMASK) {
10022 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10023 		    __func__, sc->sc_phytype, reg);
10024 		reg &= MII_ADDRMASK;
10025 	}
10026 
10027 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_READ | MDIC_PHYADD(phy) |
10028 	    MDIC_REGADD(reg));
10029 
10030 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10031 		mdic = CSR_READ(sc, WMREG_MDIC);
10032 		if (mdic & MDIC_READY)
10033 			break;
10034 		delay(50);
10035 	}
10036 
10037 	if ((mdic & MDIC_READY) == 0) {
10038 		log(LOG_WARNING, "%s: MDIC read timed out: phy %d reg %d\n",
10039 		    device_xname(dev), phy, reg);
10040 		rv = 0;
10041 	} else if (mdic & MDIC_E) {
10042 #if 0 /* This is normal if no PHY is present. */
10043 		log(LOG_WARNING, "%s: MDIC read error: phy %d reg %d\n",
10044 		    device_xname(dev), phy, reg);
10045 #endif
10046 		rv = 0;
10047 	} else {
10048 		rv = MDIC_DATA(mdic);
10049 		if (rv == 0xffff)
10050 			rv = 0;
10051 	}
10052 
10053 	return rv;
10054 }
10055 
10056 /*
10057  * wm_gmii_mdic_writereg:	[mii interface function]
10058  *
10059  *	Write a PHY register on the GMII.
10060  */
10061 static void
10062 wm_gmii_mdic_writereg(device_t dev, int phy, int reg, int val)
10063 {
10064 	struct wm_softc *sc = device_private(dev);
10065 	uint32_t mdic = 0;
10066 	int i;
10067 
10068 	if (reg > MII_ADDRMASK) {
10069 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10070 		    __func__, sc->sc_phytype, reg);
10071 		reg &= MII_ADDRMASK;
10072 	}
10073 
10074 	CSR_WRITE(sc, WMREG_MDIC, MDIC_OP_WRITE | MDIC_PHYADD(phy) |
10075 	    MDIC_REGADD(reg) | MDIC_DATA(val));
10076 
10077 	for (i = 0; i < WM_GEN_POLL_TIMEOUT * 3; i++) {
10078 		mdic = CSR_READ(sc, WMREG_MDIC);
10079 		if (mdic & MDIC_READY)
10080 			break;
10081 		delay(50);
10082 	}
10083 
10084 	if ((mdic & MDIC_READY) == 0)
10085 		log(LOG_WARNING, "%s: MDIC write timed out: phy %d reg %d\n",
10086 		    device_xname(dev), phy, reg);
10087 	else if (mdic & MDIC_E)
10088 		log(LOG_WARNING, "%s: MDIC write error: phy %d reg %d\n",
10089 		    device_xname(dev), phy, reg);
10090 }
10091 
10092 /*
10093  * wm_gmii_i82544_readreg:	[mii interface function]
10094  *
10095  *	Read a PHY register on the GMII.
10096  */
10097 static int
10098 wm_gmii_i82544_readreg(device_t dev, int phy, int reg)
10099 {
10100 	struct wm_softc *sc = device_private(dev);
10101 	int rv;
10102 
10103 	if (sc->phy.acquire(sc)) {
10104 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10105 		return 0;
10106 	}
10107 
10108 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10109 		switch (sc->sc_phytype) {
10110 		case WMPHY_IGP:
10111 		case WMPHY_IGP_2:
10112 		case WMPHY_IGP_3:
10113 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
10114 			    reg);
10115 			break;
10116 		default:
10117 #ifdef WM_DEBUG
10118 			device_printf(dev, "%s: PHYTYPE = 0x%x, addr = %02x\n",
10119 			    __func__, sc->sc_phytype, reg);
10120 #endif
10121 			break;
10122 		}
10123 	}
10124 
10125 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
10126 	sc->phy.release(sc);
10127 
10128 	return rv;
10129 }
10130 
10131 /*
10132  * wm_gmii_i82544_writereg:	[mii interface function]
10133  *
10134  *	Write a PHY register on the GMII.
10135  */
10136 static void
10137 wm_gmii_i82544_writereg(device_t dev, int phy, int reg, int val)
10138 {
10139 	struct wm_softc *sc = device_private(dev);
10140 
10141 	if (sc->phy.acquire(sc)) {
10142 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10143 		return;
10144 	}
10145 
10146 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10147 		switch (sc->sc_phytype) {
10148 		case WMPHY_IGP:
10149 		case WMPHY_IGP_2:
10150 		case WMPHY_IGP_3:
10151 			wm_gmii_mdic_writereg(dev, phy, MII_IGPHY_PAGE_SELECT,
10152 			    reg);
10153 			break;
10154 		default:
10155 #ifdef WM_DEBUG
10156 			device_printf(dev, "%s: PHYTYPE == 0x%x, addr = %02x",
10157 			    __func__, sc->sc_phytype, reg);
10158 #endif
10159 			break;
10160 		}
10161 	}
10162 
10163 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10164 	sc->phy.release(sc);
10165 }
10166 
10167 /*
10168  * wm_gmii_i80003_readreg:	[mii interface function]
10169  *
10170  *	Read a PHY register on the kumeran
10171  * This could be handled by the PHY layer if we didn't have to lock the
10172  * ressource ...
10173  */
10174 static int
10175 wm_gmii_i80003_readreg(device_t dev, int phy, int reg)
10176 {
10177 	struct wm_softc *sc = device_private(dev);
10178 	int page_select, temp;
10179 	int rv;
10180 
10181 	if (phy != 1) /* only one PHY on kumeran bus */
10182 		return 0;
10183 
10184 	if (sc->phy.acquire(sc)) {
10185 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10186 		return 0;
10187 	}
10188 
10189 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
10190 		page_select = GG82563_PHY_PAGE_SELECT;
10191 	else {
10192 		/*
10193 		 * Use Alternative Page Select register to access registers
10194 		 * 30 and 31.
10195 		 */
10196 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
10197 	}
10198 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
10199 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
10200 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
10201 		/*
10202 		 * Wait more 200us for a bug of the ready bit in the MDIC
10203 		 * register.
10204 		 */
10205 		delay(200);
10206 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
10207 			device_printf(dev, "%s failed\n", __func__);
10208 			rv = 0; /* XXX */
10209 			goto out;
10210 		}
10211 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
10212 		delay(200);
10213 	} else
10214 		rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
10215 
10216 out:
10217 	sc->phy.release(sc);
10218 	return rv;
10219 }
10220 
10221 /*
10222  * wm_gmii_i80003_writereg:	[mii interface function]
10223  *
10224  *	Write a PHY register on the kumeran.
10225  * This could be handled by the PHY layer if we didn't have to lock the
10226  * ressource ...
10227  */
10228 static void
10229 wm_gmii_i80003_writereg(device_t dev, int phy, int reg, int val)
10230 {
10231 	struct wm_softc *sc = device_private(dev);
10232 	int page_select, temp;
10233 
10234 	if (phy != 1) /* only one PHY on kumeran bus */
10235 		return;
10236 
10237 	if (sc->phy.acquire(sc)) {
10238 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10239 		return;
10240 	}
10241 
10242 	if ((reg & MII_ADDRMASK) < GG82563_MIN_ALT_REG)
10243 		page_select = GG82563_PHY_PAGE_SELECT;
10244 	else {
10245 		/*
10246 		 * Use Alternative Page Select register to access registers
10247 		 * 30 and 31.
10248 		 */
10249 		page_select = GG82563_PHY_PAGE_SELECT_ALT;
10250 	}
10251 	temp = (uint16_t)reg >> GG82563_PAGE_SHIFT;
10252 	wm_gmii_mdic_writereg(dev, phy, page_select, temp);
10253 	if ((sc->sc_flags & WM_F_80003_MDIC_WA) != 0) {
10254 		/*
10255 		 * Wait more 200us for a bug of the ready bit in the MDIC
10256 		 * register.
10257 		 */
10258 		delay(200);
10259 		if (wm_gmii_mdic_readreg(dev, phy, page_select) != temp) {
10260 			device_printf(dev, "%s failed\n", __func__);
10261 			goto out;
10262 		}
10263 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10264 		delay(200);
10265 	} else
10266 		wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10267 
10268 out:
10269 	sc->phy.release(sc);
10270 }
10271 
10272 /*
10273  * wm_gmii_bm_readreg:	[mii interface function]
10274  *
10275  *	Read a PHY register on the kumeran
10276  * This could be handled by the PHY layer if we didn't have to lock the
10277  * ressource ...
10278  */
10279 static int
10280 wm_gmii_bm_readreg(device_t dev, int phy, int reg)
10281 {
10282 	struct wm_softc *sc = device_private(dev);
10283 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
10284 	uint16_t val;
10285 	int rv;
10286 
10287 	if (sc->phy.acquire(sc)) {
10288 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10289 		return 0;
10290 	}
10291 
10292 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
10293 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
10294 		    || (reg == 31)) ? 1 : phy;
10295 	/* Page 800 works differently than the rest so it has its own func */
10296 	if (page == BM_WUC_PAGE) {
10297 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
10298 		rv = val;
10299 		goto release;
10300 	}
10301 
10302 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10303 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
10304 		    && (sc->sc_type != WM_T_82583))
10305 			wm_gmii_mdic_writereg(dev, phy,
10306 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
10307 		else
10308 			wm_gmii_mdic_writereg(dev, phy,
10309 			    BME1000_PHY_PAGE_SELECT, page);
10310 	}
10311 
10312 	rv = wm_gmii_mdic_readreg(dev, phy, reg & MII_ADDRMASK);
10313 
10314 release:
10315 	sc->phy.release(sc);
10316 	return rv;
10317 }
10318 
10319 /*
10320  * wm_gmii_bm_writereg:	[mii interface function]
10321  *
10322  *	Write a PHY register on the kumeran.
10323  * This could be handled by the PHY layer if we didn't have to lock the
10324  * ressource ...
10325  */
10326 static void
10327 wm_gmii_bm_writereg(device_t dev, int phy, int reg, int val)
10328 {
10329 	struct wm_softc *sc = device_private(dev);
10330 	uint16_t page = reg >> BME1000_PAGE_SHIFT;
10331 
10332 	if (sc->phy.acquire(sc)) {
10333 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10334 		return;
10335 	}
10336 
10337 	if ((sc->sc_type != WM_T_82574) && (sc->sc_type != WM_T_82583))
10338 		phy = ((page >= 768) || ((page == 0) && (reg == 25))
10339 		    || (reg == 31)) ? 1 : phy;
10340 	/* Page 800 works differently than the rest so it has its own func */
10341 	if (page == BM_WUC_PAGE) {
10342 		uint16_t tmp;
10343 
10344 		tmp = val;
10345 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
10346 		goto release;
10347 	}
10348 
10349 	if (reg > BME1000_MAX_MULTI_PAGE_REG) {
10350 		if ((phy == 1) && (sc->sc_type != WM_T_82574)
10351 		    && (sc->sc_type != WM_T_82583))
10352 			wm_gmii_mdic_writereg(dev, phy,
10353 			    MII_IGPHY_PAGE_SELECT, page << BME1000_PAGE_SHIFT);
10354 		else
10355 			wm_gmii_mdic_writereg(dev, phy,
10356 			    BME1000_PHY_PAGE_SELECT, page);
10357 	}
10358 
10359 	wm_gmii_mdic_writereg(dev, phy, reg & MII_ADDRMASK, val);
10360 
10361 release:
10362 	sc->phy.release(sc);
10363 }
10364 
10365 static void
10366 wm_access_phy_wakeup_reg_bm(device_t dev, int offset, int16_t *val, int rd)
10367 {
10368 	struct wm_softc *sc = device_private(dev);
10369 	uint16_t regnum = BM_PHY_REG_NUM(offset);
10370 	uint16_t wuce, reg;
10371 
10372 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10373 		device_xname(dev), __func__));
10374 	/* XXX Gig must be disabled for MDIO accesses to page 800 */
10375 	if (sc->sc_type == WM_T_PCH) {
10376 		/* XXX e1000 driver do nothing... why? */
10377 	}
10378 
10379 	/*
10380 	 * 1) Enable PHY wakeup register first.
10381 	 * See e1000_enable_phy_wakeup_reg_access_bm().
10382 	 */
10383 
10384 	/* Set page 769 */
10385 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10386 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
10387 
10388 	/* Read WUCE and save it */
10389 	wuce = wm_gmii_mdic_readreg(dev, 1, BM_WUC_ENABLE_REG);
10390 
10391 	reg = wuce | BM_WUC_ENABLE_BIT;
10392 	reg &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
10393 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, reg);
10394 
10395 	/* Select page 800 */
10396 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10397 	    BM_WUC_PAGE << BME1000_PAGE_SHIFT);
10398 
10399 	/*
10400 	 * 2) Access PHY wakeup register.
10401 	 * See e1000_access_phy_wakeup_reg_bm.
10402 	 */
10403 
10404 	/* Write page 800 */
10405 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ADDRESS_OPCODE, regnum);
10406 
10407 	if (rd)
10408 		*val = wm_gmii_mdic_readreg(dev, 1, BM_WUC_DATA_OPCODE);
10409 	else
10410 		wm_gmii_mdic_writereg(dev, 1, BM_WUC_DATA_OPCODE, *val);
10411 
10412 	/*
10413 	 * 3) Disable PHY wakeup register.
10414 	 * See e1000_disable_phy_wakeup_reg_access_bm().
10415 	 */
10416 	/* Set page 769 */
10417 	wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10418 	    BM_WUC_ENABLE_PAGE << BME1000_PAGE_SHIFT);
10419 
10420 	wm_gmii_mdic_writereg(dev, 1, BM_WUC_ENABLE_REG, wuce);
10421 }
10422 
10423 /*
10424  * wm_gmii_hv_readreg:	[mii interface function]
10425  *
10426  *	Read a PHY register on the kumeran
10427  * This could be handled by the PHY layer if we didn't have to lock the
10428  * ressource ...
10429  */
10430 static int
10431 wm_gmii_hv_readreg(device_t dev, int phy, int reg)
10432 {
10433 	struct wm_softc *sc = device_private(dev);
10434 	int rv;
10435 
10436 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10437 		device_xname(dev), __func__));
10438 	if (sc->phy.acquire(sc)) {
10439 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10440 		return 0;
10441 	}
10442 
10443 	rv = wm_gmii_hv_readreg_locked(dev, phy, reg);
10444 	sc->phy.release(sc);
10445 	return rv;
10446 }
10447 
10448 static int
10449 wm_gmii_hv_readreg_locked(device_t dev, int phy, int reg)
10450 {
10451 	uint16_t page = BM_PHY_REG_PAGE(reg);
10452 	uint16_t regnum = BM_PHY_REG_NUM(reg);
10453 	uint16_t val;
10454 	int rv;
10455 
10456 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
10457 
10458 	/* Page 800 works differently than the rest so it has its own func */
10459 	if (page == BM_WUC_PAGE) {
10460 		wm_access_phy_wakeup_reg_bm(dev, reg, &val, 1);
10461 		return val;
10462 	}
10463 
10464 	/*
10465 	 * Lower than page 768 works differently than the rest so it has its
10466 	 * own func
10467 	 */
10468 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
10469 		printf("gmii_hv_readreg!!!\n");
10470 		return 0;
10471 	}
10472 
10473 	/*
10474 	 * XXX I21[789] documents say that the SMBus Address register is at
10475 	 * PHY address 01, Page 0 (not 768), Register 26.
10476 	 */
10477 	if (page == HV_INTC_FC_PAGE_START)
10478 		page = 0;
10479 
10480 	if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
10481 		wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10482 		    page << BME1000_PAGE_SHIFT);
10483 	}
10484 
10485 	rv = wm_gmii_mdic_readreg(dev, phy, regnum & MII_ADDRMASK);
10486 	return rv;
10487 }
10488 
10489 /*
10490  * wm_gmii_hv_writereg:	[mii interface function]
10491  *
10492  *	Write a PHY register on the kumeran.
10493  * This could be handled by the PHY layer if we didn't have to lock the
10494  * ressource ...
10495  */
10496 static void
10497 wm_gmii_hv_writereg(device_t dev, int phy, int reg, int val)
10498 {
10499 	struct wm_softc *sc = device_private(dev);
10500 
10501 	DPRINTF(WM_DEBUG_GMII, ("%s: %s called\n",
10502 		device_xname(dev), __func__));
10503 
10504 	if (sc->phy.acquire(sc)) {
10505 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10506 		return;
10507 	}
10508 
10509 	wm_gmii_hv_writereg_locked(dev, phy, reg, val);
10510 	sc->phy.release(sc);
10511 }
10512 
10513 static void
10514 wm_gmii_hv_writereg_locked(device_t dev, int phy, int reg, int val)
10515 {
10516 	struct wm_softc *sc = device_private(dev);
10517 	uint16_t page = BM_PHY_REG_PAGE(reg);
10518 	uint16_t regnum = BM_PHY_REG_NUM(reg);
10519 
10520 	phy = (page >= HV_INTC_FC_PAGE_START) ? 1 : phy;
10521 
10522 	/* Page 800 works differently than the rest so it has its own func */
10523 	if (page == BM_WUC_PAGE) {
10524 		uint16_t tmp;
10525 
10526 		tmp = val;
10527 		wm_access_phy_wakeup_reg_bm(dev, reg, &tmp, 0);
10528 		return;
10529 	}
10530 
10531 	/*
10532 	 * Lower than page 768 works differently than the rest so it has its
10533 	 * own func
10534 	 */
10535 	if ((page > 0) && (page < HV_INTC_FC_PAGE_START)) {
10536 		printf("gmii_hv_writereg!!!\n");
10537 		return;
10538 	}
10539 
10540 	{
10541 		/*
10542 		 * XXX I21[789] documents say that the SMBus Address register
10543 		 * is at PHY address 01, Page 0 (not 768), Register 26.
10544 		 */
10545 		if (page == HV_INTC_FC_PAGE_START)
10546 			page = 0;
10547 
10548 		/*
10549 		 * XXX Workaround MDIO accesses being disabled after entering
10550 		 * IEEE Power Down (whenever bit 11 of the PHY control
10551 		 * register is set)
10552 		 */
10553 		if (sc->sc_phytype == WMPHY_82578) {
10554 			struct mii_softc *child;
10555 
10556 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
10557 			if ((child != NULL) && (child->mii_mpd_rev >= 1)
10558 			    && (phy == 2) && ((regnum & MII_ADDRMASK) == 0)
10559 			    && ((val & (1 << 11)) != 0)) {
10560 				printf("XXX need workaround\n");
10561 			}
10562 		}
10563 
10564 		if (regnum > BME1000_MAX_MULTI_PAGE_REG) {
10565 			wm_gmii_mdic_writereg(dev, 1, MII_IGPHY_PAGE_SELECT,
10566 			    page << BME1000_PAGE_SHIFT);
10567 		}
10568 	}
10569 
10570 	wm_gmii_mdic_writereg(dev, phy, regnum & MII_ADDRMASK, val);
10571 }
10572 
10573 /*
10574  * wm_gmii_82580_readreg:	[mii interface function]
10575  *
10576  *	Read a PHY register on the 82580 and I350.
10577  * This could be handled by the PHY layer if we didn't have to lock the
10578  * ressource ...
10579  */
10580 static int
10581 wm_gmii_82580_readreg(device_t dev, int phy, int reg)
10582 {
10583 	struct wm_softc *sc = device_private(dev);
10584 	int rv;
10585 
10586 	if (sc->phy.acquire(sc) != 0) {
10587 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10588 		return 0;
10589 	}
10590 
10591 #ifdef DIAGNOSTIC
10592 	if (reg > MII_ADDRMASK) {
10593 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10594 		    __func__, sc->sc_phytype, reg);
10595 		reg &= MII_ADDRMASK;
10596 	}
10597 #endif
10598 	rv = wm_gmii_mdic_readreg(dev, phy, reg);
10599 
10600 	sc->phy.release(sc);
10601 	return rv;
10602 }
10603 
10604 /*
10605  * wm_gmii_82580_writereg:	[mii interface function]
10606  *
10607  *	Write a PHY register on the 82580 and I350.
10608  * This could be handled by the PHY layer if we didn't have to lock the
10609  * ressource ...
10610  */
10611 static void
10612 wm_gmii_82580_writereg(device_t dev, int phy, int reg, int val)
10613 {
10614 	struct wm_softc *sc = device_private(dev);
10615 
10616 	if (sc->phy.acquire(sc) != 0) {
10617 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10618 		return;
10619 	}
10620 
10621 #ifdef DIAGNOSTIC
10622 	if (reg > MII_ADDRMASK) {
10623 		device_printf(dev, "%s: PHYTYPE = %d, addr 0x%x > 0x1f\n",
10624 		    __func__, sc->sc_phytype, reg);
10625 		reg &= MII_ADDRMASK;
10626 	}
10627 #endif
10628 	wm_gmii_mdic_writereg(dev, phy, reg, val);
10629 
10630 	sc->phy.release(sc);
10631 }
10632 
10633 /*
10634  * wm_gmii_gs40g_readreg:	[mii interface function]
10635  *
10636  *	Read a PHY register on the I2100 and I211.
10637  * This could be handled by the PHY layer if we didn't have to lock the
10638  * ressource ...
10639  */
10640 static int
10641 wm_gmii_gs40g_readreg(device_t dev, int phy, int reg)
10642 {
10643 	struct wm_softc *sc = device_private(dev);
10644 	int page, offset;
10645 	int rv;
10646 
10647 	/* Acquire semaphore */
10648 	if (sc->phy.acquire(sc)) {
10649 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10650 		return 0;
10651 	}
10652 
10653 	/* Page select */
10654 	page = reg >> GS40G_PAGE_SHIFT;
10655 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
10656 
10657 	/* Read reg */
10658 	offset = reg & GS40G_OFFSET_MASK;
10659 	rv = wm_gmii_mdic_readreg(dev, phy, offset);
10660 
10661 	sc->phy.release(sc);
10662 	return rv;
10663 }
10664 
10665 /*
10666  * wm_gmii_gs40g_writereg:	[mii interface function]
10667  *
10668  *	Write a PHY register on the I210 and I211.
10669  * This could be handled by the PHY layer if we didn't have to lock the
10670  * ressource ...
10671  */
10672 static void
10673 wm_gmii_gs40g_writereg(device_t dev, int phy, int reg, int val)
10674 {
10675 	struct wm_softc *sc = device_private(dev);
10676 	int page, offset;
10677 
10678 	/* Acquire semaphore */
10679 	if (sc->phy.acquire(sc)) {
10680 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10681 		return;
10682 	}
10683 
10684 	/* Page select */
10685 	page = reg >> GS40G_PAGE_SHIFT;
10686 	wm_gmii_mdic_writereg(dev, phy, GS40G_PAGE_SELECT, page);
10687 
10688 	/* Write reg */
10689 	offset = reg & GS40G_OFFSET_MASK;
10690 	wm_gmii_mdic_writereg(dev, phy, offset, val);
10691 
10692 	/* Release semaphore */
10693 	sc->phy.release(sc);
10694 }
10695 
10696 /*
10697  * wm_gmii_statchg:	[mii interface function]
10698  *
10699  *	Callback from MII layer when media changes.
10700  */
10701 static void
10702 wm_gmii_statchg(struct ifnet *ifp)
10703 {
10704 	struct wm_softc *sc = ifp->if_softc;
10705 	struct mii_data *mii = &sc->sc_mii;
10706 
10707 	sc->sc_ctrl &= ~(CTRL_TFCE | CTRL_RFCE);
10708 	sc->sc_tctl &= ~TCTL_COLD(0x3ff);
10709 	sc->sc_fcrtl &= ~FCRTL_XONE;
10710 
10711 	/*
10712 	 * Get flow control negotiation result.
10713 	 */
10714 	if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO &&
10715 	    (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) {
10716 		sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK;
10717 		mii->mii_media_active &= ~IFM_ETH_FMASK;
10718 	}
10719 
10720 	if (sc->sc_flowflags & IFM_FLOW) {
10721 		if (sc->sc_flowflags & IFM_ETH_TXPAUSE) {
10722 			sc->sc_ctrl |= CTRL_TFCE;
10723 			sc->sc_fcrtl |= FCRTL_XONE;
10724 		}
10725 		if (sc->sc_flowflags & IFM_ETH_RXPAUSE)
10726 			sc->sc_ctrl |= CTRL_RFCE;
10727 	}
10728 
10729 	if (sc->sc_mii.mii_media_active & IFM_FDX) {
10730 		DPRINTF(WM_DEBUG_LINK,
10731 		    ("%s: LINK: statchg: FDX\n", ifp->if_xname));
10732 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
10733 	} else {
10734 		DPRINTF(WM_DEBUG_LINK,
10735 		    ("%s: LINK: statchg: HDX\n", ifp->if_xname));
10736 		sc->sc_tctl |= TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
10737 	}
10738 
10739 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
10740 	CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
10741 	CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ? WMREG_OLD_FCRTL
10742 						 : WMREG_FCRTL, sc->sc_fcrtl);
10743 	if (sc->sc_type == WM_T_80003) {
10744 		switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
10745 		case IFM_1000_T:
10746 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
10747 			    KUMCTRLSTA_HD_CTRL_1000_DEFAULT);
10748 			sc->sc_tipg =  TIPG_1000T_80003_DFLT;
10749 			break;
10750 		default:
10751 			wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_HD_CTRL,
10752 			    KUMCTRLSTA_HD_CTRL_10_100_DEFAULT);
10753 			sc->sc_tipg =  TIPG_10_100_80003_DFLT;
10754 			break;
10755 		}
10756 		CSR_WRITE(sc, WMREG_TIPG, sc->sc_tipg);
10757 	}
10758 }
10759 
10760 /* kumeran related (80003, ICH* and PCH*) */
10761 
10762 /*
10763  * wm_kmrn_readreg:
10764  *
10765  *	Read a kumeran register
10766  */
10767 static int
10768 wm_kmrn_readreg(struct wm_softc *sc, int reg, uint16_t *val)
10769 {
10770 	int rv;
10771 
10772 	if (sc->sc_type == WM_T_80003)
10773 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10774 	else
10775 		rv = sc->phy.acquire(sc);
10776 	if (rv != 0) {
10777 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
10778 		    __func__);
10779 		return rv;
10780 	}
10781 
10782 	rv = wm_kmrn_readreg_locked(sc, reg, val);
10783 
10784 	if (sc->sc_type == WM_T_80003)
10785 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10786 	else
10787 		sc->phy.release(sc);
10788 
10789 	return rv;
10790 }
10791 
10792 static int
10793 wm_kmrn_readreg_locked(struct wm_softc *sc, int reg, uint16_t *val)
10794 {
10795 
10796 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
10797 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) |
10798 	    KUMCTRLSTA_REN);
10799 	CSR_WRITE_FLUSH(sc);
10800 	delay(2);
10801 
10802 	*val = CSR_READ(sc, WMREG_KUMCTRLSTA) & KUMCTRLSTA_MASK;
10803 
10804 	return 0;
10805 }
10806 
10807 /*
10808  * wm_kmrn_writereg:
10809  *
10810  *	Write a kumeran register
10811  */
10812 static int
10813 wm_kmrn_writereg(struct wm_softc *sc, int reg, uint16_t val)
10814 {
10815 	int rv;
10816 
10817 	if (sc->sc_type == WM_T_80003)
10818 		rv = wm_get_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10819 	else
10820 		rv = sc->phy.acquire(sc);
10821 	if (rv != 0) {
10822 		device_printf(sc->sc_dev, "%s: failed to get semaphore\n",
10823 		    __func__);
10824 		return rv;
10825 	}
10826 
10827 	rv = wm_kmrn_writereg_locked(sc, reg, val);
10828 
10829 	if (sc->sc_type == WM_T_80003)
10830 		wm_put_swfw_semaphore(sc, SWFW_MAC_CSR_SM);
10831 	else
10832 		sc->phy.release(sc);
10833 
10834 	return rv;
10835 }
10836 
10837 static int
10838 wm_kmrn_writereg_locked(struct wm_softc *sc, int reg, uint16_t val)
10839 {
10840 
10841 	CSR_WRITE(sc, WMREG_KUMCTRLSTA,
10842 	    ((reg << KUMCTRLSTA_OFFSET_SHIFT) & KUMCTRLSTA_OFFSET) | val);
10843 
10844 	return 0;
10845 }
10846 
10847 /* SGMII related */
10848 
10849 /*
10850  * wm_sgmii_uses_mdio
10851  *
10852  * Check whether the transaction is to the internal PHY or the external
10853  * MDIO interface. Return true if it's MDIO.
10854  */
10855 static bool
10856 wm_sgmii_uses_mdio(struct wm_softc *sc)
10857 {
10858 	uint32_t reg;
10859 	bool ismdio = false;
10860 
10861 	switch (sc->sc_type) {
10862 	case WM_T_82575:
10863 	case WM_T_82576:
10864 		reg = CSR_READ(sc, WMREG_MDIC);
10865 		ismdio = ((reg & MDIC_DEST) != 0);
10866 		break;
10867 	case WM_T_82580:
10868 	case WM_T_I350:
10869 	case WM_T_I354:
10870 	case WM_T_I210:
10871 	case WM_T_I211:
10872 		reg = CSR_READ(sc, WMREG_MDICNFG);
10873 		ismdio = ((reg & MDICNFG_DEST) != 0);
10874 		break;
10875 	default:
10876 		break;
10877 	}
10878 
10879 	return ismdio;
10880 }
10881 
10882 /*
10883  * wm_sgmii_readreg:	[mii interface function]
10884  *
10885  *	Read a PHY register on the SGMII
10886  * This could be handled by the PHY layer if we didn't have to lock the
10887  * ressource ...
10888  */
10889 static int
10890 wm_sgmii_readreg(device_t dev, int phy, int reg)
10891 {
10892 	struct wm_softc *sc = device_private(dev);
10893 	uint32_t i2ccmd;
10894 	int i, rv;
10895 
10896 	if (sc->phy.acquire(sc)) {
10897 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10898 		return 0;
10899 	}
10900 
10901 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
10902 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
10903 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10904 
10905 	/* Poll the ready bit */
10906 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10907 		delay(50);
10908 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10909 		if (i2ccmd & I2CCMD_READY)
10910 			break;
10911 	}
10912 	if ((i2ccmd & I2CCMD_READY) == 0)
10913 		device_printf(dev, "I2CCMD Read did not complete\n");
10914 	if ((i2ccmd & I2CCMD_ERROR) != 0)
10915 		device_printf(dev, "I2CCMD Error bit set\n");
10916 
10917 	rv = ((i2ccmd >> 8) & 0x00ff) | ((i2ccmd << 8) & 0xff00);
10918 
10919 	sc->phy.release(sc);
10920 	return rv;
10921 }
10922 
10923 /*
10924  * wm_sgmii_writereg:	[mii interface function]
10925  *
10926  *	Write a PHY register on the SGMII.
10927  * This could be handled by the PHY layer if we didn't have to lock the
10928  * ressource ...
10929  */
10930 static void
10931 wm_sgmii_writereg(device_t dev, int phy, int reg, int val)
10932 {
10933 	struct wm_softc *sc = device_private(dev);
10934 	uint32_t i2ccmd;
10935 	int i;
10936 	int swapdata;
10937 
10938 	if (sc->phy.acquire(sc) != 0) {
10939 		device_printf(dev, "%s: failed to get semaphore\n", __func__);
10940 		return;
10941 	}
10942 	/* Swap the data bytes for the I2C interface */
10943 	swapdata = ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00);
10944 	i2ccmd = (reg << I2CCMD_REG_ADDR_SHIFT)
10945 	    | (phy << I2CCMD_PHY_ADDR_SHIFT) | I2CCMD_OPCODE_WRITE | swapdata;
10946 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
10947 
10948 	/* Poll the ready bit */
10949 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
10950 		delay(50);
10951 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
10952 		if (i2ccmd & I2CCMD_READY)
10953 			break;
10954 	}
10955 	if ((i2ccmd & I2CCMD_READY) == 0)
10956 		device_printf(dev, "I2CCMD Write did not complete\n");
10957 	if ((i2ccmd & I2CCMD_ERROR) != 0)
10958 		device_printf(dev, "I2CCMD Error bit set\n");
10959 
10960 	sc->phy.release(sc);
10961 }
10962 
10963 /* TBI related */
10964 
10965 /*
10966  * wm_tbi_mediainit:
10967  *
10968  *	Initialize media for use on 1000BASE-X devices.
10969  */
10970 static void
10971 wm_tbi_mediainit(struct wm_softc *sc)
10972 {
10973 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
10974 	const char *sep = "";
10975 
10976 	if (sc->sc_type < WM_T_82543)
10977 		sc->sc_tipg = TIPG_WM_DFLT;
10978 	else
10979 		sc->sc_tipg = TIPG_LG_DFLT;
10980 
10981 	sc->sc_tbi_serdes_anegticks = 5;
10982 
10983 	/* Initialize our media structures */
10984 	sc->sc_mii.mii_ifp = ifp;
10985 	sc->sc_ethercom.ec_mii = &sc->sc_mii;
10986 
10987 	if ((sc->sc_type >= WM_T_82575)
10988 	    && (sc->sc_mediatype == WM_MEDIATYPE_SERDES))
10989 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
10990 		    wm_serdes_mediachange, wm_serdes_mediastatus);
10991 	else
10992 		ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK,
10993 		    wm_tbi_mediachange, wm_tbi_mediastatus);
10994 
10995 	/*
10996 	 * SWD Pins:
10997 	 *
10998 	 *	0 = Link LED (output)
10999 	 *	1 = Loss Of Signal (input)
11000 	 */
11001 	sc->sc_ctrl |= CTRL_SWDPIO(0);
11002 
11003 	/* XXX Perhaps this is only for TBI */
11004 	if (sc->sc_mediatype != WM_MEDIATYPE_SERDES)
11005 		sc->sc_ctrl &= ~CTRL_SWDPIO(1);
11006 
11007 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES)
11008 		sc->sc_ctrl &= ~CTRL_LRST;
11009 
11010 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11011 
11012 #define	ADD(ss, mm, dd)							\
11013 do {									\
11014 	aprint_normal("%s%s", sep, ss);					\
11015 	ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER | (mm), (dd), NULL); \
11016 	sep = ", ";							\
11017 } while (/*CONSTCOND*/0)
11018 
11019 	aprint_normal_dev(sc->sc_dev, "");
11020 
11021 	if (sc->sc_type == WM_T_I354) {
11022 		uint32_t status;
11023 
11024 		status = CSR_READ(sc, WMREG_STATUS);
11025 		if (((status & STATUS_2P5_SKU) != 0)
11026 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
11027 			ADD("2500baseKX-FDX", IFM_2500_KX | IFM_FDX,ANAR_X_FD);
11028 		} else
11029 			ADD("1000baseKX-FDX", IFM_1000_KX | IFM_FDX,ANAR_X_FD);
11030 	} else if (sc->sc_type == WM_T_82545) {
11031 		/* Only 82545 is LX (XXX except SFP) */
11032 		ADD("1000baseLX", IFM_1000_LX, ANAR_X_HD);
11033 		ADD("1000baseLX-FDX", IFM_1000_LX | IFM_FDX, ANAR_X_FD);
11034 	} else {
11035 		ADD("1000baseSX", IFM_1000_SX, ANAR_X_HD);
11036 		ADD("1000baseSX-FDX", IFM_1000_SX | IFM_FDX, ANAR_X_FD);
11037 	}
11038 	ADD("auto", IFM_AUTO, ANAR_X_FD | ANAR_X_HD);
11039 	aprint_normal("\n");
11040 
11041 #undef ADD
11042 
11043 	ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER | IFM_AUTO);
11044 }
11045 
11046 /*
11047  * wm_tbi_mediachange:	[ifmedia interface function]
11048  *
11049  *	Set hardware to newly-selected media on a 1000BASE-X device.
11050  */
11051 static int
11052 wm_tbi_mediachange(struct ifnet *ifp)
11053 {
11054 	struct wm_softc *sc = ifp->if_softc;
11055 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11056 	uint32_t status;
11057 	int i;
11058 
11059 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
11060 		/* XXX need some work for >= 82571 and < 82575 */
11061 		if (sc->sc_type < WM_T_82575)
11062 			return 0;
11063 	}
11064 
11065 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
11066 	    || (sc->sc_type >= WM_T_82575))
11067 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
11068 
11069 	sc->sc_ctrl &= ~CTRL_LRST;
11070 	sc->sc_txcw = TXCW_ANE;
11071 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11072 		sc->sc_txcw |= TXCW_FD | TXCW_HD;
11073 	else if (ife->ifm_media & IFM_FDX)
11074 		sc->sc_txcw |= TXCW_FD;
11075 	else
11076 		sc->sc_txcw |= TXCW_HD;
11077 
11078 	if ((sc->sc_mii.mii_media.ifm_media & IFM_FLOW) != 0)
11079 		sc->sc_txcw |= TXCW_SYM_PAUSE | TXCW_ASYM_PAUSE;
11080 
11081 	DPRINTF(WM_DEBUG_LINK,("%s: sc_txcw = 0x%x after autoneg check\n",
11082 		device_xname(sc->sc_dev), sc->sc_txcw));
11083 	CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
11084 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11085 	CSR_WRITE_FLUSH(sc);
11086 	delay(1000);
11087 
11088 	i = CSR_READ(sc, WMREG_CTRL) & CTRL_SWDPIN(1);
11089 	DPRINTF(WM_DEBUG_LINK,("%s: i = 0x%x\n", device_xname(sc->sc_dev),i));
11090 
11091 	/*
11092 	 * On 82544 chips and later, the CTRL_SWDPIN(1) bit will be set if the
11093 	 * optics detect a signal, 0 if they don't.
11094 	 */
11095 	if (((i != 0) && (sc->sc_type > WM_T_82544)) || (i == 0)) {
11096 		/* Have signal; wait for the link to come up. */
11097 		for (i = 0; i < WM_LINKUP_TIMEOUT; i++) {
11098 			delay(10000);
11099 			if (CSR_READ(sc, WMREG_STATUS) & STATUS_LU)
11100 				break;
11101 		}
11102 
11103 		DPRINTF(WM_DEBUG_LINK,("%s: i = %d after waiting for link\n",
11104 			device_xname(sc->sc_dev),i));
11105 
11106 		status = CSR_READ(sc, WMREG_STATUS);
11107 		DPRINTF(WM_DEBUG_LINK,
11108 		    ("%s: status after final read = 0x%x, STATUS_LU = 0x%x\n",
11109 			device_xname(sc->sc_dev),status, STATUS_LU));
11110 		if (status & STATUS_LU) {
11111 			/* Link is up. */
11112 			DPRINTF(WM_DEBUG_LINK,
11113 			    ("%s: LINK: set media -> link up %s\n",
11114 				device_xname(sc->sc_dev),
11115 				(status & STATUS_FD) ? "FDX" : "HDX"));
11116 
11117 			/*
11118 			 * NOTE: CTRL will update TFCE and RFCE automatically,
11119 			 * so we should update sc->sc_ctrl
11120 			 */
11121 			sc->sc_ctrl = CSR_READ(sc, WMREG_CTRL);
11122 			sc->sc_tctl &= ~TCTL_COLD(0x3ff);
11123 			sc->sc_fcrtl &= ~FCRTL_XONE;
11124 			if (status & STATUS_FD)
11125 				sc->sc_tctl |=
11126 				    TCTL_COLD(TX_COLLISION_DISTANCE_FDX);
11127 			else
11128 				sc->sc_tctl |=
11129 				    TCTL_COLD(TX_COLLISION_DISTANCE_HDX);
11130 			if (CSR_READ(sc, WMREG_CTRL) & CTRL_TFCE)
11131 				sc->sc_fcrtl |= FCRTL_XONE;
11132 			CSR_WRITE(sc, WMREG_TCTL, sc->sc_tctl);
11133 			CSR_WRITE(sc, (sc->sc_type < WM_T_82543) ?
11134 			    WMREG_OLD_FCRTL : WMREG_FCRTL, sc->sc_fcrtl);
11135 			sc->sc_tbi_linkup = 1;
11136 		} else {
11137 			if (i == WM_LINKUP_TIMEOUT)
11138 				wm_check_for_link(sc);
11139 			/* Link is down. */
11140 			DPRINTF(WM_DEBUG_LINK,
11141 			    ("%s: LINK: set media -> link down\n",
11142 				device_xname(sc->sc_dev)));
11143 			sc->sc_tbi_linkup = 0;
11144 		}
11145 	} else {
11146 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: set media -> no signal\n",
11147 			device_xname(sc->sc_dev)));
11148 		sc->sc_tbi_linkup = 0;
11149 	}
11150 
11151 	wm_tbi_serdes_set_linkled(sc);
11152 
11153 	return 0;
11154 }
11155 
11156 /*
11157  * wm_tbi_mediastatus:	[ifmedia interface function]
11158  *
11159  *	Get the current interface media status on a 1000BASE-X device.
11160  */
11161 static void
11162 wm_tbi_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11163 {
11164 	struct wm_softc *sc = ifp->if_softc;
11165 	uint32_t ctrl, status;
11166 
11167 	ifmr->ifm_status = IFM_AVALID;
11168 	ifmr->ifm_active = IFM_ETHER;
11169 
11170 	status = CSR_READ(sc, WMREG_STATUS);
11171 	if ((status & STATUS_LU) == 0) {
11172 		ifmr->ifm_active |= IFM_NONE;
11173 		return;
11174 	}
11175 
11176 	ifmr->ifm_status |= IFM_ACTIVE;
11177 	/* Only 82545 is LX */
11178 	if (sc->sc_type == WM_T_82545)
11179 		ifmr->ifm_active |= IFM_1000_LX;
11180 	else
11181 		ifmr->ifm_active |= IFM_1000_SX;
11182 	if (CSR_READ(sc, WMREG_STATUS) & STATUS_FD)
11183 		ifmr->ifm_active |= IFM_FDX;
11184 	else
11185 		ifmr->ifm_active |= IFM_HDX;
11186 	ctrl = CSR_READ(sc, WMREG_CTRL);
11187 	if (ctrl & CTRL_RFCE)
11188 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
11189 	if (ctrl & CTRL_TFCE)
11190 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
11191 }
11192 
11193 /* XXX TBI only */
11194 static int
11195 wm_check_for_link(struct wm_softc *sc)
11196 {
11197 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11198 	uint32_t rxcw;
11199 	uint32_t ctrl;
11200 	uint32_t status;
11201 	uint32_t sig;
11202 
11203 	if (sc->sc_mediatype == WM_MEDIATYPE_SERDES) {
11204 		/* XXX need some work for >= 82571 */
11205 		if (sc->sc_type >= WM_T_82571) {
11206 			sc->sc_tbi_linkup = 1;
11207 			return 0;
11208 		}
11209 	}
11210 
11211 	rxcw = CSR_READ(sc, WMREG_RXCW);
11212 	ctrl = CSR_READ(sc, WMREG_CTRL);
11213 	status = CSR_READ(sc, WMREG_STATUS);
11214 
11215 	sig = (sc->sc_type > WM_T_82544) ? CTRL_SWDPIN(1) : 0;
11216 
11217 	DPRINTF(WM_DEBUG_LINK,
11218 	    ("%s: %s: sig = %d, status_lu = %d, rxcw_c = %d\n",
11219 		device_xname(sc->sc_dev), __func__,
11220 		((ctrl & CTRL_SWDPIN(1)) == sig),
11221 		((status & STATUS_LU) != 0), ((rxcw & RXCW_C) != 0)));
11222 
11223 	/*
11224 	 * SWDPIN   LU RXCW
11225 	 *	0    0	  0
11226 	 *	0    0	  1	(should not happen)
11227 	 *	0    1	  0	(should not happen)
11228 	 *	0    1	  1	(should not happen)
11229 	 *	1    0	  0	Disable autonego and force linkup
11230 	 *	1    0	  1	got /C/ but not linkup yet
11231 	 *	1    1	  0	(linkup)
11232 	 *	1    1	  1	If IFM_AUTO, back to autonego
11233 	 *
11234 	 */
11235 	if (((ctrl & CTRL_SWDPIN(1)) == sig)
11236 	    && ((status & STATUS_LU) == 0)
11237 	    && ((rxcw & RXCW_C) == 0)) {
11238 		DPRINTF(WM_DEBUG_LINK, ("%s: force linkup and fullduplex\n",
11239 			__func__));
11240 		sc->sc_tbi_linkup = 0;
11241 		/* Disable auto-negotiation in the TXCW register */
11242 		CSR_WRITE(sc, WMREG_TXCW, (sc->sc_txcw & ~TXCW_ANE));
11243 
11244 		/*
11245 		 * Force link-up and also force full-duplex.
11246 		 *
11247 		 * NOTE: CTRL was updated TFCE and RFCE automatically,
11248 		 * so we should update sc->sc_ctrl
11249 		 */
11250 		sc->sc_ctrl = ctrl | CTRL_SLU | CTRL_FD;
11251 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11252 	} else if (((status & STATUS_LU) != 0)
11253 	    && ((rxcw & RXCW_C) != 0)
11254 	    && (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)) {
11255 		sc->sc_tbi_linkup = 1;
11256 		DPRINTF(WM_DEBUG_LINK, ("%s: go back to autonego\n",
11257 			__func__));
11258 		CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
11259 		CSR_WRITE(sc, WMREG_CTRL, (ctrl & ~CTRL_SLU));
11260 	} else if (((ctrl & CTRL_SWDPIN(1)) == sig)
11261 	    && ((rxcw & RXCW_C) != 0)) {
11262 		DPRINTF(WM_DEBUG_LINK, ("/C/"));
11263 	} else {
11264 		DPRINTF(WM_DEBUG_LINK, ("%s: %x,%x,%x\n", __func__, rxcw, ctrl,
11265 			status));
11266 	}
11267 
11268 	return 0;
11269 }
11270 
11271 /*
11272  * wm_tbi_tick:
11273  *
11274  *	Check the link on TBI devices.
11275  *	This function acts as mii_tick().
11276  */
11277 static void
11278 wm_tbi_tick(struct wm_softc *sc)
11279 {
11280 	struct mii_data *mii = &sc->sc_mii;
11281 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
11282 	uint32_t status;
11283 
11284 	KASSERT(WM_CORE_LOCKED(sc));
11285 
11286 	status = CSR_READ(sc, WMREG_STATUS);
11287 
11288 	/* XXX is this needed? */
11289 	(void)CSR_READ(sc, WMREG_RXCW);
11290 	(void)CSR_READ(sc, WMREG_CTRL);
11291 
11292 	/* set link status */
11293 	if ((status & STATUS_LU) == 0) {
11294 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> down\n",
11295 			device_xname(sc->sc_dev)));
11296 		sc->sc_tbi_linkup = 0;
11297 	} else if (sc->sc_tbi_linkup == 0) {
11298 		DPRINTF(WM_DEBUG_LINK, ("%s: LINK: checklink -> up %s\n",
11299 			device_xname(sc->sc_dev),
11300 			(status & STATUS_FD) ? "FDX" : "HDX"));
11301 		sc->sc_tbi_linkup = 1;
11302 		sc->sc_tbi_serdes_ticks = 0;
11303 	}
11304 
11305 	if ((sc->sc_ethercom.ec_if.if_flags & IFF_UP) == 0)
11306 		goto setled;
11307 
11308 	if ((status & STATUS_LU) == 0) {
11309 		sc->sc_tbi_linkup = 0;
11310 		/* If the timer expired, retry autonegotiation */
11311 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11312 		    && (++sc->sc_tbi_serdes_ticks
11313 			>= sc->sc_tbi_serdes_anegticks)) {
11314 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
11315 			sc->sc_tbi_serdes_ticks = 0;
11316 			/*
11317 			 * Reset the link, and let autonegotiation do
11318 			 * its thing
11319 			 */
11320 			sc->sc_ctrl |= CTRL_LRST;
11321 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11322 			CSR_WRITE_FLUSH(sc);
11323 			delay(1000);
11324 			sc->sc_ctrl &= ~CTRL_LRST;
11325 			CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11326 			CSR_WRITE_FLUSH(sc);
11327 			delay(1000);
11328 			CSR_WRITE(sc, WMREG_TXCW,
11329 			    sc->sc_txcw & ~TXCW_ANE);
11330 			CSR_WRITE(sc, WMREG_TXCW, sc->sc_txcw);
11331 		}
11332 	}
11333 
11334 setled:
11335 	wm_tbi_serdes_set_linkled(sc);
11336 }
11337 
11338 /* SERDES related */
11339 static void
11340 wm_serdes_power_up_link_82575(struct wm_softc *sc)
11341 {
11342 	uint32_t reg;
11343 
11344 	if ((sc->sc_mediatype != WM_MEDIATYPE_SERDES)
11345 	    && ((sc->sc_flags & WM_F_SGMII) == 0))
11346 		return;
11347 
11348 	reg = CSR_READ(sc, WMREG_PCS_CFG);
11349 	reg |= PCS_CFG_PCS_EN;
11350 	CSR_WRITE(sc, WMREG_PCS_CFG, reg);
11351 
11352 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
11353 	reg &= ~CTRL_EXT_SWDPIN(3);
11354 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
11355 	CSR_WRITE_FLUSH(sc);
11356 }
11357 
11358 static int
11359 wm_serdes_mediachange(struct ifnet *ifp)
11360 {
11361 	struct wm_softc *sc = ifp->if_softc;
11362 	bool pcs_autoneg = true; /* XXX */
11363 	uint32_t ctrl_ext, pcs_lctl, reg;
11364 
11365 	/* XXX Currently, this function is not called on 8257[12] */
11366 	if ((sc->sc_type == WM_T_82571) || (sc->sc_type == WM_T_82572)
11367 	    || (sc->sc_type >= WM_T_82575))
11368 		CSR_WRITE(sc, WMREG_SCTL, SCTL_DISABLE_SERDES_LOOPBACK);
11369 
11370 	wm_serdes_power_up_link_82575(sc);
11371 
11372 	sc->sc_ctrl |= CTRL_SLU;
11373 
11374 	if ((sc->sc_type == WM_T_82575) || (sc->sc_type == WM_T_82576))
11375 		sc->sc_ctrl |= CTRL_SWDPIN(0) | CTRL_SWDPIN(1);
11376 
11377 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11378 	pcs_lctl = CSR_READ(sc, WMREG_PCS_LCTL);
11379 	switch (ctrl_ext & CTRL_EXT_LINK_MODE_MASK) {
11380 	case CTRL_EXT_LINK_MODE_SGMII:
11381 		pcs_autoneg = true;
11382 		pcs_lctl &= ~PCS_LCTL_AN_TIMEOUT;
11383 		break;
11384 	case CTRL_EXT_LINK_MODE_1000KX:
11385 		pcs_autoneg = false;
11386 		/* FALLTHROUGH */
11387 	default:
11388 		if ((sc->sc_type == WM_T_82575)
11389 		    || (sc->sc_type == WM_T_82576)) {
11390 			if ((sc->sc_flags & WM_F_PCS_DIS_AUTONEGO) != 0)
11391 				pcs_autoneg = false;
11392 		}
11393 		sc->sc_ctrl |= CTRL_SPEED_1000 | CTRL_FRCSPD | CTRL_FD
11394 		    | CTRL_FRCFDX;
11395 		pcs_lctl |= PCS_LCTL_FSV_1000 | PCS_LCTL_FDV_FULL;
11396 	}
11397 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl);
11398 
11399 	if (pcs_autoneg) {
11400 		pcs_lctl |= PCS_LCTL_AN_ENABLE | PCS_LCTL_AN_RESTART;
11401 		pcs_lctl &= ~PCS_LCTL_FORCE_FC;
11402 
11403 		reg = CSR_READ(sc, WMREG_PCS_ANADV);
11404 		reg &= ~(TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE);
11405 		reg |= TXCW_ASYM_PAUSE | TXCW_SYM_PAUSE;
11406 		CSR_WRITE(sc, WMREG_PCS_ANADV, reg);
11407 	} else
11408 		pcs_lctl |= PCS_LCTL_FSD | PCS_LCTL_FORCE_FC;
11409 
11410 	CSR_WRITE(sc, WMREG_PCS_LCTL, pcs_lctl);
11411 
11412 
11413 	return 0;
11414 }
11415 
11416 static void
11417 wm_serdes_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr)
11418 {
11419 	struct wm_softc *sc = ifp->if_softc;
11420 	struct mii_data *mii = &sc->sc_mii;
11421 	struct ifmedia_entry *ife = sc->sc_mii.mii_media.ifm_cur;
11422 	uint32_t pcs_adv, pcs_lpab, reg;
11423 
11424 	ifmr->ifm_status = IFM_AVALID;
11425 	ifmr->ifm_active = IFM_ETHER;
11426 
11427 	/* Check PCS */
11428 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
11429 	if ((reg & PCS_LSTS_LINKOK) == 0) {
11430 		ifmr->ifm_active |= IFM_NONE;
11431 		sc->sc_tbi_linkup = 0;
11432 		goto setled;
11433 	}
11434 
11435 	sc->sc_tbi_linkup = 1;
11436 	ifmr->ifm_status |= IFM_ACTIVE;
11437 	if (sc->sc_type == WM_T_I354) {
11438 		uint32_t status;
11439 
11440 		status = CSR_READ(sc, WMREG_STATUS);
11441 		if (((status & STATUS_2P5_SKU) != 0)
11442 		    && ((status & STATUS_2P5_SKU_OVER) == 0)) {
11443 			ifmr->ifm_active |= IFM_2500_SX; /* XXX KX */
11444 		} else
11445 			ifmr->ifm_active |= IFM_1000_SX; /* XXX KX */
11446 	} else {
11447 		switch (__SHIFTOUT(reg, PCS_LSTS_SPEED)) {
11448 		case PCS_LSTS_SPEED_10:
11449 			ifmr->ifm_active |= IFM_10_T; /* XXX */
11450 			break;
11451 		case PCS_LSTS_SPEED_100:
11452 			ifmr->ifm_active |= IFM_100_FX; /* XXX */
11453 			break;
11454 		case PCS_LSTS_SPEED_1000:
11455 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
11456 			break;
11457 		default:
11458 			device_printf(sc->sc_dev, "Unknown speed\n");
11459 			ifmr->ifm_active |= IFM_1000_SX; /* XXX */
11460 			break;
11461 		}
11462 	}
11463 	if ((reg & PCS_LSTS_FDX) != 0)
11464 		ifmr->ifm_active |= IFM_FDX;
11465 	else
11466 		ifmr->ifm_active |= IFM_HDX;
11467 	mii->mii_media_active &= ~IFM_ETH_FMASK;
11468 	if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
11469 		/* Check flow */
11470 		reg = CSR_READ(sc, WMREG_PCS_LSTS);
11471 		if ((reg & PCS_LSTS_AN_COMP) == 0) {
11472 			DPRINTF(WM_DEBUG_LINK, ("XXX LINKOK but not ACOMP\n"));
11473 			goto setled;
11474 		}
11475 		pcs_adv = CSR_READ(sc, WMREG_PCS_ANADV);
11476 		pcs_lpab = CSR_READ(sc, WMREG_PCS_LPAB);
11477 		DPRINTF(WM_DEBUG_LINK,
11478 		    ("XXX AN result(2) %08x, %08x\n", pcs_adv, pcs_lpab));
11479 		if ((pcs_adv & TXCW_SYM_PAUSE)
11480 		    && (pcs_lpab & TXCW_SYM_PAUSE)) {
11481 			mii->mii_media_active |= IFM_FLOW
11482 			    | IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
11483 		} else if (((pcs_adv & TXCW_SYM_PAUSE) == 0)
11484 		    && (pcs_adv & TXCW_ASYM_PAUSE)
11485 		    && (pcs_lpab & TXCW_SYM_PAUSE)
11486 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
11487 			mii->mii_media_active |= IFM_FLOW
11488 			    | IFM_ETH_TXPAUSE;
11489 		} else if ((pcs_adv & TXCW_SYM_PAUSE)
11490 		    && (pcs_adv & TXCW_ASYM_PAUSE)
11491 		    && ((pcs_lpab & TXCW_SYM_PAUSE) == 0)
11492 		    && (pcs_lpab & TXCW_ASYM_PAUSE)) {
11493 			mii->mii_media_active |= IFM_FLOW
11494 			    | IFM_ETH_RXPAUSE;
11495 		}
11496 	}
11497 	ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK)
11498 	    | (mii->mii_media_active & IFM_ETH_FMASK);
11499 setled:
11500 	wm_tbi_serdes_set_linkled(sc);
11501 }
11502 
11503 /*
11504  * wm_serdes_tick:
11505  *
11506  *	Check the link on serdes devices.
11507  */
11508 static void
11509 wm_serdes_tick(struct wm_softc *sc)
11510 {
11511 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
11512 	struct mii_data *mii = &sc->sc_mii;
11513 	struct ifmedia_entry *ife = mii->mii_media.ifm_cur;
11514 	uint32_t reg;
11515 
11516 	KASSERT(WM_CORE_LOCKED(sc));
11517 
11518 	mii->mii_media_status = IFM_AVALID;
11519 	mii->mii_media_active = IFM_ETHER;
11520 
11521 	/* Check PCS */
11522 	reg = CSR_READ(sc, WMREG_PCS_LSTS);
11523 	if ((reg & PCS_LSTS_LINKOK) != 0) {
11524 		mii->mii_media_status |= IFM_ACTIVE;
11525 		sc->sc_tbi_linkup = 1;
11526 		sc->sc_tbi_serdes_ticks = 0;
11527 		mii->mii_media_active |= IFM_1000_SX; /* XXX */
11528 		if ((reg & PCS_LSTS_FDX) != 0)
11529 			mii->mii_media_active |= IFM_FDX;
11530 		else
11531 			mii->mii_media_active |= IFM_HDX;
11532 	} else {
11533 		mii->mii_media_status |= IFM_NONE;
11534 		sc->sc_tbi_linkup = 0;
11535 		/* If the timer expired, retry autonegotiation */
11536 		if ((IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO)
11537 		    && (++sc->sc_tbi_serdes_ticks
11538 			>= sc->sc_tbi_serdes_anegticks)) {
11539 			DPRINTF(WM_DEBUG_LINK, ("EXPIRE\n"));
11540 			sc->sc_tbi_serdes_ticks = 0;
11541 			/* XXX */
11542 			wm_serdes_mediachange(ifp);
11543 		}
11544 	}
11545 
11546 	wm_tbi_serdes_set_linkled(sc);
11547 }
11548 
11549 /* SFP related */
11550 
11551 static int
11552 wm_sfp_read_data_byte(struct wm_softc *sc, uint16_t offset, uint8_t *data)
11553 {
11554 	uint32_t i2ccmd;
11555 	int i;
11556 
11557 	i2ccmd = (offset << I2CCMD_REG_ADDR_SHIFT) | I2CCMD_OPCODE_READ;
11558 	CSR_WRITE(sc, WMREG_I2CCMD, i2ccmd);
11559 
11560 	/* Poll the ready bit */
11561 	for (i = 0; i < I2CCMD_PHY_TIMEOUT; i++) {
11562 		delay(50);
11563 		i2ccmd = CSR_READ(sc, WMREG_I2CCMD);
11564 		if (i2ccmd & I2CCMD_READY)
11565 			break;
11566 	}
11567 	if ((i2ccmd & I2CCMD_READY) == 0)
11568 		return -1;
11569 	if ((i2ccmd & I2CCMD_ERROR) != 0)
11570 		return -1;
11571 
11572 	*data = i2ccmd & 0x00ff;
11573 
11574 	return 0;
11575 }
11576 
11577 static uint32_t
11578 wm_sfp_get_media_type(struct wm_softc *sc)
11579 {
11580 	uint32_t ctrl_ext;
11581 	uint8_t val = 0;
11582 	int timeout = 3;
11583 	uint32_t mediatype = WM_MEDIATYPE_UNKNOWN;
11584 	int rv = -1;
11585 
11586 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
11587 	ctrl_ext &= ~CTRL_EXT_SWDPIN(3);
11588 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_I2C_ENA);
11589 	CSR_WRITE_FLUSH(sc);
11590 
11591 	/* Read SFP module data */
11592 	while (timeout) {
11593 		rv = wm_sfp_read_data_byte(sc, SFF_SFP_ID_OFF, &val);
11594 		if (rv == 0)
11595 			break;
11596 		delay(100*1000); /* XXX too big */
11597 		timeout--;
11598 	}
11599 	if (rv != 0)
11600 		goto out;
11601 	switch (val) {
11602 	case SFF_SFP_ID_SFF:
11603 		aprint_normal_dev(sc->sc_dev,
11604 		    "Module/Connector soldered to board\n");
11605 		break;
11606 	case SFF_SFP_ID_SFP:
11607 		aprint_normal_dev(sc->sc_dev, "SFP\n");
11608 		break;
11609 	case SFF_SFP_ID_UNKNOWN:
11610 		goto out;
11611 	default:
11612 		break;
11613 	}
11614 
11615 	rv = wm_sfp_read_data_byte(sc, SFF_SFP_ETH_FLAGS_OFF, &val);
11616 	if (rv != 0) {
11617 		goto out;
11618 	}
11619 
11620 	if ((val & (SFF_SFP_ETH_FLAGS_1000SX | SFF_SFP_ETH_FLAGS_1000LX)) != 0)
11621 		mediatype = WM_MEDIATYPE_SERDES;
11622 	else if ((val & SFF_SFP_ETH_FLAGS_1000T) != 0) {
11623 		sc->sc_flags |= WM_F_SGMII;
11624 		mediatype = WM_MEDIATYPE_COPPER;
11625 	} else if ((val & SFF_SFP_ETH_FLAGS_100FX) != 0) {
11626 		sc->sc_flags |= WM_F_SGMII;
11627 		mediatype = WM_MEDIATYPE_SERDES;
11628 	}
11629 
11630 out:
11631 	/* Restore I2C interface setting */
11632 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
11633 
11634 	return mediatype;
11635 }
11636 
11637 /*
11638  * NVM related.
11639  * Microwire, SPI (w/wo EERD) and Flash.
11640  */
11641 
11642 /* Both spi and uwire */
11643 
11644 /*
11645  * wm_eeprom_sendbits:
11646  *
11647  *	Send a series of bits to the EEPROM.
11648  */
11649 static void
11650 wm_eeprom_sendbits(struct wm_softc *sc, uint32_t bits, int nbits)
11651 {
11652 	uint32_t reg;
11653 	int x;
11654 
11655 	reg = CSR_READ(sc, WMREG_EECD);
11656 
11657 	for (x = nbits; x > 0; x--) {
11658 		if (bits & (1U << (x - 1)))
11659 			reg |= EECD_DI;
11660 		else
11661 			reg &= ~EECD_DI;
11662 		CSR_WRITE(sc, WMREG_EECD, reg);
11663 		CSR_WRITE_FLUSH(sc);
11664 		delay(2);
11665 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
11666 		CSR_WRITE_FLUSH(sc);
11667 		delay(2);
11668 		CSR_WRITE(sc, WMREG_EECD, reg);
11669 		CSR_WRITE_FLUSH(sc);
11670 		delay(2);
11671 	}
11672 }
11673 
11674 /*
11675  * wm_eeprom_recvbits:
11676  *
11677  *	Receive a series of bits from the EEPROM.
11678  */
11679 static void
11680 wm_eeprom_recvbits(struct wm_softc *sc, uint32_t *valp, int nbits)
11681 {
11682 	uint32_t reg, val;
11683 	int x;
11684 
11685 	reg = CSR_READ(sc, WMREG_EECD) & ~EECD_DI;
11686 
11687 	val = 0;
11688 	for (x = nbits; x > 0; x--) {
11689 		CSR_WRITE(sc, WMREG_EECD, reg | EECD_SK);
11690 		CSR_WRITE_FLUSH(sc);
11691 		delay(2);
11692 		if (CSR_READ(sc, WMREG_EECD) & EECD_DO)
11693 			val |= (1U << (x - 1));
11694 		CSR_WRITE(sc, WMREG_EECD, reg);
11695 		CSR_WRITE_FLUSH(sc);
11696 		delay(2);
11697 	}
11698 	*valp = val;
11699 }
11700 
11701 /* Microwire */
11702 
11703 /*
11704  * wm_nvm_read_uwire:
11705  *
11706  *	Read a word from the EEPROM using the MicroWire protocol.
11707  */
11708 static int
11709 wm_nvm_read_uwire(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11710 {
11711 	uint32_t reg, val;
11712 	int i;
11713 
11714 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11715 		device_xname(sc->sc_dev), __func__));
11716 
11717 	if (sc->nvm.acquire(sc) != 0)
11718 		return -1;
11719 
11720 	for (i = 0; i < wordcnt; i++) {
11721 		/* Clear SK and DI. */
11722 		reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_DI);
11723 		CSR_WRITE(sc, WMREG_EECD, reg);
11724 
11725 		/*
11726 		 * XXX: workaround for a bug in qemu-0.12.x and prior
11727 		 * and Xen.
11728 		 *
11729 		 * We use this workaround only for 82540 because qemu's
11730 		 * e1000 act as 82540.
11731 		 */
11732 		if (sc->sc_type == WM_T_82540) {
11733 			reg |= EECD_SK;
11734 			CSR_WRITE(sc, WMREG_EECD, reg);
11735 			reg &= ~EECD_SK;
11736 			CSR_WRITE(sc, WMREG_EECD, reg);
11737 			CSR_WRITE_FLUSH(sc);
11738 			delay(2);
11739 		}
11740 		/* XXX: end of workaround */
11741 
11742 		/* Set CHIP SELECT. */
11743 		reg |= EECD_CS;
11744 		CSR_WRITE(sc, WMREG_EECD, reg);
11745 		CSR_WRITE_FLUSH(sc);
11746 		delay(2);
11747 
11748 		/* Shift in the READ command. */
11749 		wm_eeprom_sendbits(sc, UWIRE_OPC_READ, 3);
11750 
11751 		/* Shift in address. */
11752 		wm_eeprom_sendbits(sc, word + i, sc->sc_nvm_addrbits);
11753 
11754 		/* Shift out the data. */
11755 		wm_eeprom_recvbits(sc, &val, 16);
11756 		data[i] = val & 0xffff;
11757 
11758 		/* Clear CHIP SELECT. */
11759 		reg = CSR_READ(sc, WMREG_EECD) & ~EECD_CS;
11760 		CSR_WRITE(sc, WMREG_EECD, reg);
11761 		CSR_WRITE_FLUSH(sc);
11762 		delay(2);
11763 	}
11764 
11765 	sc->nvm.release(sc);
11766 	return 0;
11767 }
11768 
11769 /* SPI */
11770 
11771 /*
11772  * Set SPI and FLASH related information from the EECD register.
11773  * For 82541 and 82547, the word size is taken from EEPROM.
11774  */
11775 static int
11776 wm_nvm_set_addrbits_size_eecd(struct wm_softc *sc)
11777 {
11778 	int size;
11779 	uint32_t reg;
11780 	uint16_t data;
11781 
11782 	reg = CSR_READ(sc, WMREG_EECD);
11783 	sc->sc_nvm_addrbits = (reg & EECD_EE_ABITS) ? 16 : 8;
11784 
11785 	/* Read the size of NVM from EECD by default */
11786 	size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
11787 	switch (sc->sc_type) {
11788 	case WM_T_82541:
11789 	case WM_T_82541_2:
11790 	case WM_T_82547:
11791 	case WM_T_82547_2:
11792 		/* Set dummy value to access EEPROM */
11793 		sc->sc_nvm_wordsize = 64;
11794 		if (wm_nvm_read(sc, NVM_OFF_EEPROM_SIZE, 1, &data) != 0) {
11795 			aprint_error_dev(sc->sc_dev,
11796 			    "%s: failed to read EEPROM size\n", __func__);
11797 		}
11798 		reg = data;
11799 		size = __SHIFTOUT(reg, EECD_EE_SIZE_EX_MASK);
11800 		if (size == 0)
11801 			size = 6; /* 64 word size */
11802 		else
11803 			size += NVM_WORD_SIZE_BASE_SHIFT + 1;
11804 		break;
11805 	case WM_T_80003:
11806 	case WM_T_82571:
11807 	case WM_T_82572:
11808 	case WM_T_82573: /* SPI case */
11809 	case WM_T_82574: /* SPI case */
11810 	case WM_T_82583: /* SPI case */
11811 		size += NVM_WORD_SIZE_BASE_SHIFT;
11812 		if (size > 14)
11813 			size = 14;
11814 		break;
11815 	case WM_T_82575:
11816 	case WM_T_82576:
11817 	case WM_T_82580:
11818 	case WM_T_I350:
11819 	case WM_T_I354:
11820 	case WM_T_I210:
11821 	case WM_T_I211:
11822 		size += NVM_WORD_SIZE_BASE_SHIFT;
11823 		if (size > 15)
11824 			size = 15;
11825 		break;
11826 	default:
11827 		aprint_error_dev(sc->sc_dev,
11828 		    "%s: unknown device(%d)?\n", __func__, sc->sc_type);
11829 		return -1;
11830 		break;
11831 	}
11832 
11833 	sc->sc_nvm_wordsize = 1 << size;
11834 
11835 	return 0;
11836 }
11837 
11838 /*
11839  * wm_nvm_ready_spi:
11840  *
11841  *	Wait for a SPI EEPROM to be ready for commands.
11842  */
11843 static int
11844 wm_nvm_ready_spi(struct wm_softc *sc)
11845 {
11846 	uint32_t val;
11847 	int usec;
11848 
11849 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11850 		device_xname(sc->sc_dev), __func__));
11851 
11852 	for (usec = 0; usec < SPI_MAX_RETRIES; delay(5), usec += 5) {
11853 		wm_eeprom_sendbits(sc, SPI_OPC_RDSR, 8);
11854 		wm_eeprom_recvbits(sc, &val, 8);
11855 		if ((val & SPI_SR_RDY) == 0)
11856 			break;
11857 	}
11858 	if (usec >= SPI_MAX_RETRIES) {
11859 		aprint_error_dev(sc->sc_dev,"EEPROM failed to become ready\n");
11860 		return -1;
11861 	}
11862 	return 0;
11863 }
11864 
11865 /*
11866  * wm_nvm_read_spi:
11867  *
11868  *	Read a work from the EEPROM using the SPI protocol.
11869  */
11870 static int
11871 wm_nvm_read_spi(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
11872 {
11873 	uint32_t reg, val;
11874 	int i;
11875 	uint8_t opc;
11876 	int rv = 0;
11877 
11878 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11879 		device_xname(sc->sc_dev), __func__));
11880 
11881 	if (sc->nvm.acquire(sc) != 0)
11882 		return -1;
11883 
11884 	/* Clear SK and CS. */
11885 	reg = CSR_READ(sc, WMREG_EECD) & ~(EECD_SK | EECD_CS);
11886 	CSR_WRITE(sc, WMREG_EECD, reg);
11887 	CSR_WRITE_FLUSH(sc);
11888 	delay(2);
11889 
11890 	if ((rv = wm_nvm_ready_spi(sc)) != 0)
11891 		goto out;
11892 
11893 	/* Toggle CS to flush commands. */
11894 	CSR_WRITE(sc, WMREG_EECD, reg | EECD_CS);
11895 	CSR_WRITE_FLUSH(sc);
11896 	delay(2);
11897 	CSR_WRITE(sc, WMREG_EECD, reg);
11898 	CSR_WRITE_FLUSH(sc);
11899 	delay(2);
11900 
11901 	opc = SPI_OPC_READ;
11902 	if (sc->sc_nvm_addrbits == 8 && word >= 128)
11903 		opc |= SPI_OPC_A8;
11904 
11905 	wm_eeprom_sendbits(sc, opc, 8);
11906 	wm_eeprom_sendbits(sc, word << 1, sc->sc_nvm_addrbits);
11907 
11908 	for (i = 0; i < wordcnt; i++) {
11909 		wm_eeprom_recvbits(sc, &val, 16);
11910 		data[i] = ((val >> 8) & 0xff) | ((val & 0xff) << 8);
11911 	}
11912 
11913 	/* Raise CS and clear SK. */
11914 	reg = (CSR_READ(sc, WMREG_EECD) & ~EECD_SK) | EECD_CS;
11915 	CSR_WRITE(sc, WMREG_EECD, reg);
11916 	CSR_WRITE_FLUSH(sc);
11917 	delay(2);
11918 
11919 out:
11920 	sc->nvm.release(sc);
11921 	return rv;
11922 }
11923 
11924 /* Using with EERD */
11925 
11926 static int
11927 wm_poll_eerd_eewr_done(struct wm_softc *sc, int rw)
11928 {
11929 	uint32_t attempts = 100000;
11930 	uint32_t i, reg = 0;
11931 	int32_t done = -1;
11932 
11933 	for (i = 0; i < attempts; i++) {
11934 		reg = CSR_READ(sc, rw);
11935 
11936 		if (reg & EERD_DONE) {
11937 			done = 0;
11938 			break;
11939 		}
11940 		delay(5);
11941 	}
11942 
11943 	return done;
11944 }
11945 
11946 static int
11947 wm_nvm_read_eerd(struct wm_softc *sc, int offset, int wordcnt, uint16_t *data)
11948 {
11949 	int i, eerd = 0;
11950 	int rv = 0;
11951 
11952 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
11953 		device_xname(sc->sc_dev), __func__));
11954 
11955 	if (sc->nvm.acquire(sc) != 0)
11956 		return -1;
11957 
11958 	for (i = 0; i < wordcnt; i++) {
11959 		eerd = ((offset + i) << EERD_ADDR_SHIFT) | EERD_START;
11960 		CSR_WRITE(sc, WMREG_EERD, eerd);
11961 		rv = wm_poll_eerd_eewr_done(sc, WMREG_EERD);
11962 		if (rv != 0) {
11963 			aprint_error_dev(sc->sc_dev, "EERD polling failed: "
11964 			    "offset=%d. wordcnt=%d\n", offset, wordcnt);
11965 			break;
11966 		}
11967 		data[i] = (CSR_READ(sc, WMREG_EERD) >> EERD_DATA_SHIFT);
11968 	}
11969 
11970 	sc->nvm.release(sc);
11971 	return rv;
11972 }
11973 
11974 /* Flash */
11975 
11976 static int
11977 wm_nvm_valid_bank_detect_ich8lan(struct wm_softc *sc, unsigned int *bank)
11978 {
11979 	uint32_t eecd;
11980 	uint32_t act_offset = ICH_NVM_SIG_WORD * 2 + 1;
11981 	uint32_t bank1_offset = sc->sc_ich8_flash_bank_size * sizeof(uint16_t);
11982 	uint32_t nvm_dword = 0;
11983 	uint8_t sig_byte = 0;
11984 	int rv;
11985 
11986 	switch (sc->sc_type) {
11987 	case WM_T_PCH_SPT:
11988 	case WM_T_PCH_CNP:
11989 		bank1_offset = sc->sc_ich8_flash_bank_size * 2;
11990 		act_offset = ICH_NVM_SIG_WORD * 2;
11991 
11992 		/* set bank to 0 in case flash read fails. */
11993 		*bank = 0;
11994 
11995 		/* Check bank 0 */
11996 		rv = wm_read_ich8_dword(sc, act_offset, &nvm_dword);
11997 		if (rv != 0)
11998 			return rv;
11999 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
12000 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
12001 			*bank = 0;
12002 			return 0;
12003 		}
12004 
12005 		/* Check bank 1 */
12006 		rv = wm_read_ich8_dword(sc, act_offset + bank1_offset,
12007 		    &nvm_dword);
12008 		sig_byte = (uint8_t)((nvm_dword & 0xFF00) >> 8);
12009 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
12010 			*bank = 1;
12011 			return 0;
12012 		}
12013 		aprint_error_dev(sc->sc_dev,
12014 		    "%s: no valid NVM bank present (%u)\n", __func__, *bank);
12015 		return -1;
12016 	case WM_T_ICH8:
12017 	case WM_T_ICH9:
12018 		eecd = CSR_READ(sc, WMREG_EECD);
12019 		if ((eecd & EECD_SEC1VAL_VALMASK) == EECD_SEC1VAL_VALMASK) {
12020 			*bank = ((eecd & EECD_SEC1VAL) != 0) ? 1 : 0;
12021 			return 0;
12022 		}
12023 		/* FALLTHROUGH */
12024 	default:
12025 		/* Default to 0 */
12026 		*bank = 0;
12027 
12028 		/* Check bank 0 */
12029 		wm_read_ich8_byte(sc, act_offset, &sig_byte);
12030 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
12031 			*bank = 0;
12032 			return 0;
12033 		}
12034 
12035 		/* Check bank 1 */
12036 		wm_read_ich8_byte(sc, act_offset + bank1_offset,
12037 		    &sig_byte);
12038 		if ((sig_byte & ICH_NVM_VALID_SIG_MASK) == ICH_NVM_SIG_VALUE) {
12039 			*bank = 1;
12040 			return 0;
12041 		}
12042 	}
12043 
12044 	DPRINTF(WM_DEBUG_NVM, ("%s: No valid NVM bank present\n",
12045 		device_xname(sc->sc_dev)));
12046 	return -1;
12047 }
12048 
12049 /******************************************************************************
12050  * This function does initial flash setup so that a new read/write/erase cycle
12051  * can be started.
12052  *
12053  * sc - The pointer to the hw structure
12054  ****************************************************************************/
12055 static int32_t
12056 wm_ich8_cycle_init(struct wm_softc *sc)
12057 {
12058 	uint16_t hsfsts;
12059 	int32_t error = 1;
12060 	int32_t i     = 0;
12061 
12062 	if (sc->sc_type >= WM_T_PCH_SPT)
12063 		hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) & 0xffffUL;
12064 	else
12065 		hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
12066 
12067 	/* May be check the Flash Des Valid bit in Hw status */
12068 	if ((hsfsts & HSFSTS_FLDVAL) == 0) {
12069 		return error;
12070 	}
12071 
12072 	/* Clear FCERR in Hw status by writing 1 */
12073 	/* Clear DAEL in Hw status by writing a 1 */
12074 	hsfsts |= HSFSTS_ERR | HSFSTS_DAEL;
12075 
12076 	if (sc->sc_type >= WM_T_PCH_SPT)
12077 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS, hsfsts & 0xffffUL);
12078 	else
12079 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
12080 
12081 	/*
12082 	 * Either we should have a hardware SPI cycle in progress bit to check
12083 	 * against, in order to start a new cycle or FDONE bit should be
12084 	 * changed in the hardware so that it is 1 after harware reset, which
12085 	 * can then be used as an indication whether a cycle is in progress or
12086 	 * has been completed .. we should also have some software semaphore
12087 	 * mechanism to guard FDONE or the cycle in progress bit so that two
12088 	 * threads access to those bits can be sequentiallized or a way so that
12089 	 * 2 threads dont start the cycle at the same time
12090 	 */
12091 
12092 	if ((hsfsts & HSFSTS_FLINPRO) == 0) {
12093 		/*
12094 		 * There is no cycle running at present, so we can start a
12095 		 * cycle
12096 		 */
12097 
12098 		/* Begin by setting Flash Cycle Done. */
12099 		hsfsts |= HSFSTS_DONE;
12100 		if (sc->sc_type >= WM_T_PCH_SPT)
12101 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
12102 			    hsfsts & 0xffffUL);
12103 		else
12104 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS, hsfsts);
12105 		error = 0;
12106 	} else {
12107 		/*
12108 		 * otherwise poll for sometime so the current cycle has a
12109 		 * chance to end before giving up.
12110 		 */
12111 		for (i = 0; i < ICH_FLASH_COMMAND_TIMEOUT; i++) {
12112 			if (sc->sc_type >= WM_T_PCH_SPT)
12113 				hsfsts = ICH8_FLASH_READ32(sc,
12114 				    ICH_FLASH_HSFSTS) & 0xffffUL;
12115 			else
12116 				hsfsts = ICH8_FLASH_READ16(sc,
12117 				    ICH_FLASH_HSFSTS);
12118 			if ((hsfsts & HSFSTS_FLINPRO) == 0) {
12119 				error = 0;
12120 				break;
12121 			}
12122 			delay(1);
12123 		}
12124 		if (error == 0) {
12125 			/*
12126 			 * Successful in waiting for previous cycle to timeout,
12127 			 * now set the Flash Cycle Done.
12128 			 */
12129 			hsfsts |= HSFSTS_DONE;
12130 			if (sc->sc_type >= WM_T_PCH_SPT)
12131 				ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
12132 				    hsfsts & 0xffffUL);
12133 			else
12134 				ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFSTS,
12135 				    hsfsts);
12136 		}
12137 	}
12138 	return error;
12139 }
12140 
12141 /******************************************************************************
12142  * This function starts a flash cycle and waits for its completion
12143  *
12144  * sc - The pointer to the hw structure
12145  ****************************************************************************/
12146 static int32_t
12147 wm_ich8_flash_cycle(struct wm_softc *sc, uint32_t timeout)
12148 {
12149 	uint16_t hsflctl;
12150 	uint16_t hsfsts;
12151 	int32_t error = 1;
12152 	uint32_t i = 0;
12153 
12154 	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
12155 	if (sc->sc_type >= WM_T_PCH_SPT)
12156 		hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS) >> 16;
12157 	else
12158 		hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
12159 	hsflctl |= HSFCTL_GO;
12160 	if (sc->sc_type >= WM_T_PCH_SPT)
12161 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
12162 		    (uint32_t)hsflctl << 16);
12163 	else
12164 		ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
12165 
12166 	/* Wait till FDONE bit is set to 1 */
12167 	do {
12168 		if (sc->sc_type >= WM_T_PCH_SPT)
12169 			hsfsts = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
12170 			    & 0xffffUL;
12171 		else
12172 			hsfsts = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFSTS);
12173 		if (hsfsts & HSFSTS_DONE)
12174 			break;
12175 		delay(1);
12176 		i++;
12177 	} while (i < timeout);
12178 	if ((hsfsts & HSFSTS_DONE) == 1 && (hsfsts & HSFSTS_ERR) == 0)
12179 		error = 0;
12180 
12181 	return error;
12182 }
12183 
12184 /******************************************************************************
12185  * Reads a byte or (d)word from the NVM using the ICH8 flash access registers.
12186  *
12187  * sc - The pointer to the hw structure
12188  * index - The index of the byte or word to read.
12189  * size - Size of data to read, 1=byte 2=word, 4=dword
12190  * data - Pointer to the word to store the value read.
12191  *****************************************************************************/
12192 static int32_t
12193 wm_read_ich8_data(struct wm_softc *sc, uint32_t index,
12194     uint32_t size, uint32_t *data)
12195 {
12196 	uint16_t hsfsts;
12197 	uint16_t hsflctl;
12198 	uint32_t flash_linear_address;
12199 	uint32_t flash_data = 0;
12200 	int32_t error = 1;
12201 	int32_t count = 0;
12202 
12203 	if (size < 1  || size > 4 || data == 0x0 ||
12204 	    index > ICH_FLASH_LINEAR_ADDR_MASK)
12205 		return error;
12206 
12207 	flash_linear_address = (ICH_FLASH_LINEAR_ADDR_MASK & index) +
12208 	    sc->sc_ich8_flash_base;
12209 
12210 	do {
12211 		delay(1);
12212 		/* Steps */
12213 		error = wm_ich8_cycle_init(sc);
12214 		if (error)
12215 			break;
12216 
12217 		if (sc->sc_type >= WM_T_PCH_SPT)
12218 			hsflctl = ICH8_FLASH_READ32(sc, ICH_FLASH_HSFSTS)
12219 			    >> 16;
12220 		else
12221 			hsflctl = ICH8_FLASH_READ16(sc, ICH_FLASH_HSFCTL);
12222 		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
12223 		hsflctl |=  ((size - 1) << HSFCTL_BCOUNT_SHIFT)
12224 		    & HSFCTL_BCOUNT_MASK;
12225 		hsflctl |= ICH_CYCLE_READ << HSFCTL_CYCLE_SHIFT;
12226 		if (sc->sc_type >= WM_T_PCH_SPT) {
12227 			/*
12228 			 * In SPT, This register is in Lan memory space, not
12229 			 * flash. Therefore, only 32 bit access is supported.
12230 			 */
12231 			ICH8_FLASH_WRITE32(sc, ICH_FLASH_HSFSTS,
12232 			    (uint32_t)hsflctl << 16);
12233 		} else
12234 			ICH8_FLASH_WRITE16(sc, ICH_FLASH_HSFCTL, hsflctl);
12235 
12236 		/*
12237 		 * Write the last 24 bits of index into Flash Linear address
12238 		 * field in Flash Address
12239 		 */
12240 		/* TODO: TBD maybe check the index against the size of flash */
12241 
12242 		ICH8_FLASH_WRITE32(sc, ICH_FLASH_FADDR, flash_linear_address);
12243 
12244 		error = wm_ich8_flash_cycle(sc, ICH_FLASH_COMMAND_TIMEOUT);
12245 
12246 		/*
12247 		 * Check if FCERR is set to 1, if set to 1, clear it and try
12248 		 * the whole sequence a few more times, else read in (shift in)
12249 		 * the Flash Data0, the order is least significant byte first
12250 		 * msb to lsb
12251 		 */
12252 		if (error == 0) {
12253 			flash_data = ICH8_FLASH_READ32(sc, ICH_FLASH_FDATA0);
12254 			if (size == 1)
12255 				*data = (uint8_t)(flash_data & 0x000000FF);
12256 			else if (size == 2)
12257 				*data = (uint16_t)(flash_data & 0x0000FFFF);
12258 			else if (size == 4)
12259 				*data = (uint32_t)flash_data;
12260 			break;
12261 		} else {
12262 			/*
12263 			 * If we've gotten here, then things are probably
12264 			 * completely hosed, but if the error condition is
12265 			 * detected, it won't hurt to give it another try...
12266 			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
12267 			 */
12268 			if (sc->sc_type >= WM_T_PCH_SPT)
12269 				hsfsts = ICH8_FLASH_READ32(sc,
12270 				    ICH_FLASH_HSFSTS) & 0xffffUL;
12271 			else
12272 				hsfsts = ICH8_FLASH_READ16(sc,
12273 				    ICH_FLASH_HSFSTS);
12274 
12275 			if (hsfsts & HSFSTS_ERR) {
12276 				/* Repeat for some time before giving up. */
12277 				continue;
12278 			} else if ((hsfsts & HSFSTS_DONE) == 0)
12279 				break;
12280 		}
12281 	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
12282 
12283 	return error;
12284 }
12285 
12286 /******************************************************************************
12287  * Reads a single byte from the NVM using the ICH8 flash access registers.
12288  *
12289  * sc - pointer to wm_hw structure
12290  * index - The index of the byte to read.
12291  * data - Pointer to a byte to store the value read.
12292  *****************************************************************************/
12293 static int32_t
12294 wm_read_ich8_byte(struct wm_softc *sc, uint32_t index, uint8_t* data)
12295 {
12296 	int32_t status;
12297 	uint32_t word = 0;
12298 
12299 	status = wm_read_ich8_data(sc, index, 1, &word);
12300 	if (status == 0)
12301 		*data = (uint8_t)word;
12302 	else
12303 		*data = 0;
12304 
12305 	return status;
12306 }
12307 
12308 /******************************************************************************
12309  * Reads a word from the NVM using the ICH8 flash access registers.
12310  *
12311  * sc - pointer to wm_hw structure
12312  * index - The starting byte index of the word to read.
12313  * data - Pointer to a word to store the value read.
12314  *****************************************************************************/
12315 static int32_t
12316 wm_read_ich8_word(struct wm_softc *sc, uint32_t index, uint16_t *data)
12317 {
12318 	int32_t status;
12319 	uint32_t word = 0;
12320 
12321 	status = wm_read_ich8_data(sc, index, 2, &word);
12322 	if (status == 0)
12323 		*data = (uint16_t)word;
12324 	else
12325 		*data = 0;
12326 
12327 	return status;
12328 }
12329 
12330 /******************************************************************************
12331  * Reads a dword from the NVM using the ICH8 flash access registers.
12332  *
12333  * sc - pointer to wm_hw structure
12334  * index - The starting byte index of the word to read.
12335  * data - Pointer to a word to store the value read.
12336  *****************************************************************************/
12337 static int32_t
12338 wm_read_ich8_dword(struct wm_softc *sc, uint32_t index, uint32_t *data)
12339 {
12340 	int32_t status;
12341 
12342 	status = wm_read_ich8_data(sc, index, 4, data);
12343 	return status;
12344 }
12345 
12346 /******************************************************************************
12347  * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
12348  * register.
12349  *
12350  * sc - Struct containing variables accessed by shared code
12351  * offset - offset of word in the EEPROM to read
12352  * data - word read from the EEPROM
12353  * words - number of words to read
12354  *****************************************************************************/
12355 static int
12356 wm_nvm_read_ich8(struct wm_softc *sc, int offset, int words, uint16_t *data)
12357 {
12358 	int32_t	 rv = 0;
12359 	uint32_t flash_bank = 0;
12360 	uint32_t act_offset = 0;
12361 	uint32_t bank_offset = 0;
12362 	uint16_t word = 0;
12363 	uint16_t i = 0;
12364 
12365 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12366 		device_xname(sc->sc_dev), __func__));
12367 
12368 	if (sc->nvm.acquire(sc) != 0)
12369 		return -1;
12370 
12371 	/*
12372 	 * We need to know which is the valid flash bank.  In the event
12373 	 * that we didn't allocate eeprom_shadow_ram, we may not be
12374 	 * managing flash_bank. So it cannot be trusted and needs
12375 	 * to be updated with each read.
12376 	 */
12377 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
12378 	if (rv) {
12379 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
12380 			device_xname(sc->sc_dev)));
12381 		flash_bank = 0;
12382 	}
12383 
12384 	/*
12385 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
12386 	 * size
12387 	 */
12388 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
12389 
12390 	for (i = 0; i < words; i++) {
12391 		/* The NVM part needs a byte offset, hence * 2 */
12392 		act_offset = bank_offset + ((offset + i) * 2);
12393 		rv = wm_read_ich8_word(sc, act_offset, &word);
12394 		if (rv) {
12395 			aprint_error_dev(sc->sc_dev,
12396 			    "%s: failed to read NVM\n", __func__);
12397 			break;
12398 		}
12399 		data[i] = word;
12400 	}
12401 
12402 	sc->nvm.release(sc);
12403 	return rv;
12404 }
12405 
12406 /******************************************************************************
12407  * Reads a 16 bit word or words from the EEPROM using the SPT's flash access
12408  * register.
12409  *
12410  * sc - Struct containing variables accessed by shared code
12411  * offset - offset of word in the EEPROM to read
12412  * data - word read from the EEPROM
12413  * words - number of words to read
12414  *****************************************************************************/
12415 static int
12416 wm_nvm_read_spt(struct wm_softc *sc, int offset, int words, uint16_t *data)
12417 {
12418 	int32_t	 rv = 0;
12419 	uint32_t flash_bank = 0;
12420 	uint32_t act_offset = 0;
12421 	uint32_t bank_offset = 0;
12422 	uint32_t dword = 0;
12423 	uint16_t i = 0;
12424 
12425 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12426 		device_xname(sc->sc_dev), __func__));
12427 
12428 	if (sc->nvm.acquire(sc) != 0)
12429 		return -1;
12430 
12431 	/*
12432 	 * We need to know which is the valid flash bank.  In the event
12433 	 * that we didn't allocate eeprom_shadow_ram, we may not be
12434 	 * managing flash_bank. So it cannot be trusted and needs
12435 	 * to be updated with each read.
12436 	 */
12437 	rv = wm_nvm_valid_bank_detect_ich8lan(sc, &flash_bank);
12438 	if (rv) {
12439 		DPRINTF(WM_DEBUG_NVM, ("%s: failed to detect NVM bank\n",
12440 			device_xname(sc->sc_dev)));
12441 		flash_bank = 0;
12442 	}
12443 
12444 	/*
12445 	 * Adjust offset appropriately if we're on bank 1 - adjust for word
12446 	 * size
12447 	 */
12448 	bank_offset = flash_bank * (sc->sc_ich8_flash_bank_size * 2);
12449 
12450 	for (i = 0; i < words; i++) {
12451 		/* The NVM part needs a byte offset, hence * 2 */
12452 		act_offset = bank_offset + ((offset + i) * 2);
12453 		/* but we must read dword aligned, so mask ... */
12454 		rv = wm_read_ich8_dword(sc, act_offset & ~0x3, &dword);
12455 		if (rv) {
12456 			aprint_error_dev(sc->sc_dev,
12457 			    "%s: failed to read NVM\n", __func__);
12458 			break;
12459 		}
12460 		/* ... and pick out low or high word */
12461 		if ((act_offset & 0x2) == 0)
12462 			data[i] = (uint16_t)(dword & 0xFFFF);
12463 		else
12464 			data[i] = (uint16_t)((dword >> 16) & 0xFFFF);
12465 	}
12466 
12467 	sc->nvm.release(sc);
12468 	return rv;
12469 }
12470 
12471 /* iNVM */
12472 
12473 static int
12474 wm_nvm_read_word_invm(struct wm_softc *sc, uint16_t address, uint16_t *data)
12475 {
12476 	int32_t	 rv = 0;
12477 	uint32_t invm_dword;
12478 	uint16_t i;
12479 	uint8_t record_type, word_address;
12480 
12481 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12482 		device_xname(sc->sc_dev), __func__));
12483 
12484 	for (i = 0; i < INVM_SIZE; i++) {
12485 		invm_dword = CSR_READ(sc, WM_INVM_DATA_REG(i));
12486 		/* Get record type */
12487 		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
12488 		if (record_type == INVM_UNINITIALIZED_STRUCTURE)
12489 			break;
12490 		if (record_type == INVM_CSR_AUTOLOAD_STRUCTURE)
12491 			i += INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
12492 		if (record_type == INVM_RSA_KEY_SHA256_STRUCTURE)
12493 			i += INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
12494 		if (record_type == INVM_WORD_AUTOLOAD_STRUCTURE) {
12495 			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
12496 			if (word_address == address) {
12497 				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
12498 				rv = 0;
12499 				break;
12500 			}
12501 		}
12502 	}
12503 
12504 	return rv;
12505 }
12506 
12507 static int
12508 wm_nvm_read_invm(struct wm_softc *sc, int offset, int words, uint16_t *data)
12509 {
12510 	int rv = 0;
12511 	int i;
12512 
12513 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12514 		device_xname(sc->sc_dev), __func__));
12515 
12516 	if (sc->nvm.acquire(sc) != 0)
12517 		return -1;
12518 
12519 	for (i = 0; i < words; i++) {
12520 		switch (offset + i) {
12521 		case NVM_OFF_MACADDR:
12522 		case NVM_OFF_MACADDR1:
12523 		case NVM_OFF_MACADDR2:
12524 			rv = wm_nvm_read_word_invm(sc, offset + i, &data[i]);
12525 			if (rv != 0) {
12526 				data[i] = 0xffff;
12527 				rv = -1;
12528 			}
12529 			break;
12530 		case NVM_OFF_CFG2:
12531 			rv = wm_nvm_read_word_invm(sc, offset, data);
12532 			if (rv != 0) {
12533 				*data = NVM_INIT_CTRL_2_DEFAULT_I211;
12534 				rv = 0;
12535 			}
12536 			break;
12537 		case NVM_OFF_CFG4:
12538 			rv = wm_nvm_read_word_invm(sc, offset, data);
12539 			if (rv != 0) {
12540 				*data = NVM_INIT_CTRL_4_DEFAULT_I211;
12541 				rv = 0;
12542 			}
12543 			break;
12544 		case NVM_OFF_LED_1_CFG:
12545 			rv = wm_nvm_read_word_invm(sc, offset, data);
12546 			if (rv != 0) {
12547 				*data = NVM_LED_1_CFG_DEFAULT_I211;
12548 				rv = 0;
12549 			}
12550 			break;
12551 		case NVM_OFF_LED_0_2_CFG:
12552 			rv = wm_nvm_read_word_invm(sc, offset, data);
12553 			if (rv != 0) {
12554 				*data = NVM_LED_0_2_CFG_DEFAULT_I211;
12555 				rv = 0;
12556 			}
12557 			break;
12558 		case NVM_OFF_ID_LED_SETTINGS:
12559 			rv = wm_nvm_read_word_invm(sc, offset, data);
12560 			if (rv != 0) {
12561 				*data = ID_LED_RESERVED_FFFF;
12562 				rv = 0;
12563 			}
12564 			break;
12565 		default:
12566 			DPRINTF(WM_DEBUG_NVM,
12567 			    ("NVM word 0x%02x is not mapped.\n", offset));
12568 			*data = NVM_RESERVED_WORD;
12569 			break;
12570 		}
12571 	}
12572 
12573 	sc->nvm.release(sc);
12574 	return rv;
12575 }
12576 
12577 /* Lock, detecting NVM type, validate checksum, version and read */
12578 
12579 static int
12580 wm_nvm_is_onboard_eeprom(struct wm_softc *sc)
12581 {
12582 	uint32_t eecd = 0;
12583 
12584 	if (sc->sc_type == WM_T_82573 || sc->sc_type == WM_T_82574
12585 	    || sc->sc_type == WM_T_82583) {
12586 		eecd = CSR_READ(sc, WMREG_EECD);
12587 
12588 		/* Isolate bits 15 & 16 */
12589 		eecd = ((eecd >> 15) & 0x03);
12590 
12591 		/* If both bits are set, device is Flash type */
12592 		if (eecd == 0x03)
12593 			return 0;
12594 	}
12595 	return 1;
12596 }
12597 
12598 static int
12599 wm_nvm_flash_presence_i210(struct wm_softc *sc)
12600 {
12601 	uint32_t eec;
12602 
12603 	eec = CSR_READ(sc, WMREG_EEC);
12604 	if ((eec & EEC_FLASH_DETECTED) != 0)
12605 		return 1;
12606 
12607 	return 0;
12608 }
12609 
12610 /*
12611  * wm_nvm_validate_checksum
12612  *
12613  * The checksum is defined as the sum of the first 64 (16 bit) words.
12614  */
12615 static int
12616 wm_nvm_validate_checksum(struct wm_softc *sc)
12617 {
12618 	uint16_t checksum;
12619 	uint16_t eeprom_data;
12620 #ifdef WM_DEBUG
12621 	uint16_t csum_wordaddr, valid_checksum;
12622 #endif
12623 	int i;
12624 
12625 	checksum = 0;
12626 
12627 	/* Don't check for I211 */
12628 	if (sc->sc_type == WM_T_I211)
12629 		return 0;
12630 
12631 #ifdef WM_DEBUG
12632 	if ((sc->sc_type == WM_T_PCH_LPT) || (sc->sc_type == WM_T_PCH_SPT)
12633 	    || (sc->sc_type == WM_T_PCH_CNP)) {
12634 		csum_wordaddr = NVM_OFF_COMPAT;
12635 		valid_checksum = NVM_COMPAT_VALID_CHECKSUM;
12636 	} else {
12637 		csum_wordaddr = NVM_OFF_FUTURE_INIT_WORD1;
12638 		valid_checksum = NVM_FUTURE_INIT_WORD1_VALID_CHECKSUM;
12639 	}
12640 
12641 	/* Dump EEPROM image for debug */
12642 	if ((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
12643 	    || (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
12644 	    || (sc->sc_type == WM_T_PCH2) || (sc->sc_type == WM_T_PCH_LPT)) {
12645 		/* XXX PCH_SPT? */
12646 		wm_nvm_read(sc, csum_wordaddr, 1, &eeprom_data);
12647 		if ((eeprom_data & valid_checksum) == 0) {
12648 			DPRINTF(WM_DEBUG_NVM,
12649 			    ("%s: NVM need to be updated (%04x != %04x)\n",
12650 				device_xname(sc->sc_dev), eeprom_data,
12651 				    valid_checksum));
12652 		}
12653 	}
12654 
12655 	if ((wm_debug & WM_DEBUG_NVM) != 0) {
12656 		printf("%s: NVM dump:\n", device_xname(sc->sc_dev));
12657 		for (i = 0; i < NVM_SIZE; i++) {
12658 			if (wm_nvm_read(sc, i, 1, &eeprom_data))
12659 				printf("XXXX ");
12660 			else
12661 				printf("%04hx ", eeprom_data);
12662 			if (i % 8 == 7)
12663 				printf("\n");
12664 		}
12665 	}
12666 
12667 #endif /* WM_DEBUG */
12668 
12669 	for (i = 0; i < NVM_SIZE; i++) {
12670 		if (wm_nvm_read(sc, i, 1, &eeprom_data))
12671 			return 1;
12672 		checksum += eeprom_data;
12673 	}
12674 
12675 	if (checksum != (uint16_t) NVM_CHECKSUM) {
12676 #ifdef WM_DEBUG
12677 		printf("%s: NVM checksum mismatch (%04x != %04x)\n",
12678 		    device_xname(sc->sc_dev), checksum, NVM_CHECKSUM);
12679 #endif
12680 	}
12681 
12682 	return 0;
12683 }
12684 
12685 static void
12686 wm_nvm_version_invm(struct wm_softc *sc)
12687 {
12688 	uint32_t dword;
12689 
12690 	/*
12691 	 * Linux's code to decode version is very strange, so we don't
12692 	 * obey that algorithm and just use word 61 as the document.
12693 	 * Perhaps it's not perfect though...
12694 	 *
12695 	 * Example:
12696 	 *
12697 	 *   Word61: 00800030 -> Version 0.6 (I211 spec update notes about 0.6)
12698 	 */
12699 	dword = CSR_READ(sc, WM_INVM_DATA_REG(61));
12700 	dword = __SHIFTOUT(dword, INVM_VER_1);
12701 	sc->sc_nvm_ver_major = __SHIFTOUT(dword, INVM_MAJOR);
12702 	sc->sc_nvm_ver_minor = __SHIFTOUT(dword, INVM_MINOR);
12703 }
12704 
12705 static void
12706 wm_nvm_version(struct wm_softc *sc)
12707 {
12708 	uint16_t major, minor, build, patch;
12709 	uint16_t uid0, uid1;
12710 	uint16_t nvm_data;
12711 	uint16_t off;
12712 	bool check_version = false;
12713 	bool check_optionrom = false;
12714 	bool have_build = false;
12715 	bool have_uid = true;
12716 
12717 	/*
12718 	 * Version format:
12719 	 *
12720 	 * XYYZ
12721 	 * X0YZ
12722 	 * X0YY
12723 	 *
12724 	 * Example:
12725 	 *
12726 	 *	82571	0x50a2	5.10.2?	(the spec update notes about 5.6-5.10)
12727 	 *	82571	0x50a6	5.10.6?
12728 	 *	82572	0x506a	5.6.10?
12729 	 *	82572EI	0x5069	5.6.9?
12730 	 *	82574L	0x1080	1.8.0?	(the spec update notes about 2.1.4)
12731 	 *		0x2013	2.1.3?
12732 	 *	82583	0x10a0	1.10.0? (document says it's default vaule)
12733 	 */
12734 
12735 	/*
12736 	 * XXX
12737 	 * Qemu's e1000e emulation (82574L)'s SPI has only 64 words.
12738 	 * I've never seen on real 82574 hardware with such small SPI ROM.
12739 	 */
12740 	if ((sc->sc_nvm_wordsize < NVM_OFF_IMAGE_UID1)
12741 	    || (wm_nvm_read(sc, NVM_OFF_IMAGE_UID1, 1, &uid1) != 0))
12742 		have_uid = false;
12743 
12744 	switch (sc->sc_type) {
12745 	case WM_T_82571:
12746 	case WM_T_82572:
12747 	case WM_T_82574:
12748 	case WM_T_82583:
12749 		check_version = true;
12750 		check_optionrom = true;
12751 		have_build = true;
12752 		break;
12753 	case WM_T_82575:
12754 	case WM_T_82576:
12755 	case WM_T_82580:
12756 		if (have_uid && (uid1 & NVM_MAJOR_MASK) != NVM_UID_VALID)
12757 			check_version = true;
12758 		break;
12759 	case WM_T_I211:
12760 		wm_nvm_version_invm(sc);
12761 		have_uid = false;
12762 		goto printver;
12763 	case WM_T_I210:
12764 		if (!wm_nvm_flash_presence_i210(sc)) {
12765 			wm_nvm_version_invm(sc);
12766 			have_uid = false;
12767 			goto printver;
12768 		}
12769 		/* FALLTHROUGH */
12770 	case WM_T_I350:
12771 	case WM_T_I354:
12772 		check_version = true;
12773 		check_optionrom = true;
12774 		break;
12775 	default:
12776 		return;
12777 	}
12778 	if (check_version
12779 	    && (wm_nvm_read(sc, NVM_OFF_VERSION, 1, &nvm_data) == 0)) {
12780 		major = (nvm_data & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
12781 		if (have_build || ((nvm_data & 0x0f00) != 0x0000)) {
12782 			minor = (nvm_data & NVM_MINOR_MASK) >> NVM_MINOR_SHIFT;
12783 			build = nvm_data & NVM_BUILD_MASK;
12784 			have_build = true;
12785 		} else
12786 			minor = nvm_data & 0x00ff;
12787 
12788 		/* Decimal */
12789 		minor = (minor / 16) * 10 + (minor % 16);
12790 		sc->sc_nvm_ver_major = major;
12791 		sc->sc_nvm_ver_minor = minor;
12792 
12793 printver:
12794 		aprint_verbose(", version %d.%d", sc->sc_nvm_ver_major,
12795 		    sc->sc_nvm_ver_minor);
12796 		if (have_build) {
12797 			sc->sc_nvm_ver_build = build;
12798 			aprint_verbose(".%d", build);
12799 		}
12800 	}
12801 
12802 	/* Assume the Option ROM area is at avove NVM_SIZE */
12803 	if ((sc->sc_nvm_wordsize > NVM_SIZE) && check_optionrom
12804 	    && (wm_nvm_read(sc, NVM_OFF_COMB_VER_PTR, 1, &off) == 0)) {
12805 		/* Option ROM Version */
12806 		if ((off != 0x0000) && (off != 0xffff)) {
12807 			int rv;
12808 
12809 			off += NVM_COMBO_VER_OFF;
12810 			rv = wm_nvm_read(sc, off + 1, 1, &uid1);
12811 			rv |= wm_nvm_read(sc, off, 1, &uid0);
12812 			if ((rv == 0) && (uid0 != 0) && (uid0 != 0xffff)
12813 			    && (uid1 != 0) && (uid1 != 0xffff)) {
12814 				/* 16bits */
12815 				major = uid0 >> 8;
12816 				build = (uid0 << 8) | (uid1 >> 8);
12817 				patch = uid1 & 0x00ff;
12818 				aprint_verbose(", option ROM Version %d.%d.%d",
12819 				    major, build, patch);
12820 			}
12821 		}
12822 	}
12823 
12824 	if (have_uid && (wm_nvm_read(sc, NVM_OFF_IMAGE_UID0, 1, &uid0) == 0))
12825 		aprint_verbose(", Image Unique ID %08x", (uid1 << 16) | uid0);
12826 }
12827 
12828 /*
12829  * wm_nvm_read:
12830  *
12831  *	Read data from the serial EEPROM.
12832  */
12833 static int
12834 wm_nvm_read(struct wm_softc *sc, int word, int wordcnt, uint16_t *data)
12835 {
12836 	int rv;
12837 
12838 	DPRINTF(WM_DEBUG_NVM, ("%s: %s called\n",
12839 		device_xname(sc->sc_dev), __func__));
12840 
12841 	if (sc->sc_flags & WM_F_EEPROM_INVALID)
12842 		return -1;
12843 
12844 	rv = sc->nvm.read(sc, word, wordcnt, data);
12845 
12846 	return rv;
12847 }
12848 
12849 /*
12850  * Hardware semaphores.
12851  * Very complexed...
12852  */
12853 
12854 static int
12855 wm_get_null(struct wm_softc *sc)
12856 {
12857 
12858 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12859 		device_xname(sc->sc_dev), __func__));
12860 	return 0;
12861 }
12862 
12863 static void
12864 wm_put_null(struct wm_softc *sc)
12865 {
12866 
12867 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12868 		device_xname(sc->sc_dev), __func__));
12869 	return;
12870 }
12871 
12872 static int
12873 wm_get_eecd(struct wm_softc *sc)
12874 {
12875 	uint32_t reg;
12876 	int x;
12877 
12878 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
12879 		device_xname(sc->sc_dev), __func__));
12880 
12881 	reg = CSR_READ(sc, WMREG_EECD);
12882 
12883 	/* Request EEPROM access. */
12884 	reg |= EECD_EE_REQ;
12885 	CSR_WRITE(sc, WMREG_EECD, reg);
12886 
12887 	/* ..and wait for it to be granted. */
12888 	for (x = 0; x < 1000; x++) {
12889 		reg = CSR_READ(sc, WMREG_EECD);
12890 		if (reg & EECD_EE_GNT)
12891 			break;
12892 		delay(5);
12893 	}
12894 	if ((reg & EECD_EE_GNT) == 0) {
12895 		aprint_error_dev(sc->sc_dev,
12896 		    "could not acquire EEPROM GNT\n");
12897 		reg &= ~EECD_EE_REQ;
12898 		CSR_WRITE(sc, WMREG_EECD, reg);
12899 		return -1;
12900 	}
12901 
12902 	return 0;
12903 }
12904 
12905 static void
12906 wm_nvm_eec_clock_raise(struct wm_softc *sc, uint32_t *eecd)
12907 {
12908 
12909 	*eecd |= EECD_SK;
12910 	CSR_WRITE(sc, WMREG_EECD, *eecd);
12911 	CSR_WRITE_FLUSH(sc);
12912 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
12913 		delay(1);
12914 	else
12915 		delay(50);
12916 }
12917 
12918 static void
12919 wm_nvm_eec_clock_lower(struct wm_softc *sc, uint32_t *eecd)
12920 {
12921 
12922 	*eecd &= ~EECD_SK;
12923 	CSR_WRITE(sc, WMREG_EECD, *eecd);
12924 	CSR_WRITE_FLUSH(sc);
12925 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0)
12926 		delay(1);
12927 	else
12928 		delay(50);
12929 }
12930 
12931 static void
12932 wm_put_eecd(struct wm_softc *sc)
12933 {
12934 	uint32_t reg;
12935 
12936 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12937 		device_xname(sc->sc_dev), __func__));
12938 
12939 	/* Stop nvm */
12940 	reg = CSR_READ(sc, WMREG_EECD);
12941 	if ((sc->sc_flags & WM_F_EEPROM_SPI) != 0) {
12942 		/* Pull CS high */
12943 		reg |= EECD_CS;
12944 		wm_nvm_eec_clock_lower(sc, &reg);
12945 	} else {
12946 		/* CS on Microwire is active-high */
12947 		reg &= ~(EECD_CS | EECD_DI);
12948 		CSR_WRITE(sc, WMREG_EECD, reg);
12949 		wm_nvm_eec_clock_raise(sc, &reg);
12950 		wm_nvm_eec_clock_lower(sc, &reg);
12951 	}
12952 
12953 	reg = CSR_READ(sc, WMREG_EECD);
12954 	reg &= ~EECD_EE_REQ;
12955 	CSR_WRITE(sc, WMREG_EECD, reg);
12956 
12957 	return;
12958 }
12959 
12960 /*
12961  * Get hardware semaphore.
12962  * Same as e1000_get_hw_semaphore_generic()
12963  */
12964 static int
12965 wm_get_swsm_semaphore(struct wm_softc *sc)
12966 {
12967 	int32_t timeout;
12968 	uint32_t swsm;
12969 
12970 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
12971 		device_xname(sc->sc_dev), __func__));
12972 	KASSERT(sc->sc_nvm_wordsize > 0);
12973 
12974 retry:
12975 	/* Get the SW semaphore. */
12976 	timeout = sc->sc_nvm_wordsize + 1;
12977 	while (timeout) {
12978 		swsm = CSR_READ(sc, WMREG_SWSM);
12979 
12980 		if ((swsm & SWSM_SMBI) == 0)
12981 			break;
12982 
12983 		delay(50);
12984 		timeout--;
12985 	}
12986 
12987 	if (timeout == 0) {
12988 		if ((sc->sc_flags & WM_F_WA_I210_CLSEM) != 0) {
12989 			/*
12990 			 * In rare circumstances, the SW semaphore may already
12991 			 * be held unintentionally. Clear the semaphore once
12992 			 * before giving up.
12993 			 */
12994 			sc->sc_flags &= ~WM_F_WA_I210_CLSEM;
12995 			wm_put_swsm_semaphore(sc);
12996 			goto retry;
12997 		}
12998 		aprint_error_dev(sc->sc_dev,
12999 		    "could not acquire SWSM SMBI\n");
13000 		return 1;
13001 	}
13002 
13003 	/* Get the FW semaphore. */
13004 	timeout = sc->sc_nvm_wordsize + 1;
13005 	while (timeout) {
13006 		swsm = CSR_READ(sc, WMREG_SWSM);
13007 		swsm |= SWSM_SWESMBI;
13008 		CSR_WRITE(sc, WMREG_SWSM, swsm);
13009 		/* If we managed to set the bit we got the semaphore. */
13010 		swsm = CSR_READ(sc, WMREG_SWSM);
13011 		if (swsm & SWSM_SWESMBI)
13012 			break;
13013 
13014 		delay(50);
13015 		timeout--;
13016 	}
13017 
13018 	if (timeout == 0) {
13019 		aprint_error_dev(sc->sc_dev,
13020 		    "could not acquire SWSM SWESMBI\n");
13021 		/* Release semaphores */
13022 		wm_put_swsm_semaphore(sc);
13023 		return 1;
13024 	}
13025 	return 0;
13026 }
13027 
13028 /*
13029  * Put hardware semaphore.
13030  * Same as e1000_put_hw_semaphore_generic()
13031  */
13032 static void
13033 wm_put_swsm_semaphore(struct wm_softc *sc)
13034 {
13035 	uint32_t swsm;
13036 
13037 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13038 		device_xname(sc->sc_dev), __func__));
13039 
13040 	swsm = CSR_READ(sc, WMREG_SWSM);
13041 	swsm &= ~(SWSM_SMBI | SWSM_SWESMBI);
13042 	CSR_WRITE(sc, WMREG_SWSM, swsm);
13043 }
13044 
13045 /*
13046  * Get SW/FW semaphore.
13047  * Same as e1000_acquire_swfw_sync_{80003es2lan,82575}().
13048  */
13049 static int
13050 wm_get_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
13051 {
13052 	uint32_t swfw_sync;
13053 	uint32_t swmask = mask << SWFW_SOFT_SHIFT;
13054 	uint32_t fwmask = mask << SWFW_FIRM_SHIFT;
13055 	int timeout;
13056 
13057 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13058 		device_xname(sc->sc_dev), __func__));
13059 
13060 	if (sc->sc_type == WM_T_80003)
13061 		timeout = 50;
13062 	else
13063 		timeout = 200;
13064 
13065 	while (timeout) {
13066 		if (wm_get_swsm_semaphore(sc)) {
13067 			aprint_error_dev(sc->sc_dev,
13068 			    "%s: failed to get semaphore\n",
13069 			    __func__);
13070 			return 1;
13071 		}
13072 		swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
13073 		if ((swfw_sync & (swmask | fwmask)) == 0) {
13074 			swfw_sync |= swmask;
13075 			CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
13076 			wm_put_swsm_semaphore(sc);
13077 			return 0;
13078 		}
13079 		wm_put_swsm_semaphore(sc);
13080 		delay(5000);
13081 		timeout--;
13082 	}
13083 	printf("%s: failed to get swfw semaphore mask 0x%x swfw 0x%x\n",
13084 	    device_xname(sc->sc_dev), mask, swfw_sync);
13085 	return 1;
13086 }
13087 
13088 static void
13089 wm_put_swfw_semaphore(struct wm_softc *sc, uint16_t mask)
13090 {
13091 	uint32_t swfw_sync;
13092 
13093 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13094 		device_xname(sc->sc_dev), __func__));
13095 
13096 	while (wm_get_swsm_semaphore(sc) != 0)
13097 		continue;
13098 
13099 	swfw_sync = CSR_READ(sc, WMREG_SW_FW_SYNC);
13100 	swfw_sync &= ~(mask << SWFW_SOFT_SHIFT);
13101 	CSR_WRITE(sc, WMREG_SW_FW_SYNC, swfw_sync);
13102 
13103 	wm_put_swsm_semaphore(sc);
13104 }
13105 
13106 static int
13107 wm_get_nvm_80003(struct wm_softc *sc)
13108 {
13109 	int rv;
13110 
13111 	DPRINTF(WM_DEBUG_LOCK | WM_DEBUG_NVM, ("%s: %s called\n",
13112 		device_xname(sc->sc_dev), __func__));
13113 
13114 	if ((rv = wm_get_swfw_semaphore(sc, SWFW_EEP_SM)) != 0) {
13115 		aprint_error_dev(sc->sc_dev,
13116 		    "%s: failed to get semaphore(SWFW)\n",
13117 		    __func__);
13118 		return rv;
13119 	}
13120 
13121 	if (((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13122 	    && (rv = wm_get_eecd(sc)) != 0) {
13123 		aprint_error_dev(sc->sc_dev,
13124 		    "%s: failed to get semaphore(EECD)\n",
13125 		    __func__);
13126 		wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
13127 		return rv;
13128 	}
13129 
13130 	return 0;
13131 }
13132 
13133 static void
13134 wm_put_nvm_80003(struct wm_softc *sc)
13135 {
13136 
13137 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13138 		device_xname(sc->sc_dev), __func__));
13139 
13140 	if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13141 		wm_put_eecd(sc);
13142 	wm_put_swfw_semaphore(sc, SWFW_EEP_SM);
13143 }
13144 
13145 static int
13146 wm_get_nvm_82571(struct wm_softc *sc)
13147 {
13148 	int rv;
13149 
13150 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13151 		device_xname(sc->sc_dev), __func__));
13152 
13153 	if ((rv = wm_get_swsm_semaphore(sc)) != 0)
13154 		return rv;
13155 
13156 	switch (sc->sc_type) {
13157 	case WM_T_82573:
13158 		break;
13159 	default:
13160 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13161 			rv = wm_get_eecd(sc);
13162 		break;
13163 	}
13164 
13165 	if (rv != 0) {
13166 		aprint_error_dev(sc->sc_dev,
13167 		    "%s: failed to get semaphore\n",
13168 		    __func__);
13169 		wm_put_swsm_semaphore(sc);
13170 	}
13171 
13172 	return rv;
13173 }
13174 
13175 static void
13176 wm_put_nvm_82571(struct wm_softc *sc)
13177 {
13178 
13179 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13180 		device_xname(sc->sc_dev), __func__));
13181 
13182 	switch (sc->sc_type) {
13183 	case WM_T_82573:
13184 		break;
13185 	default:
13186 		if ((sc->sc_flags & WM_F_LOCK_EECD) != 0)
13187 			wm_put_eecd(sc);
13188 		break;
13189 	}
13190 
13191 	wm_put_swsm_semaphore(sc);
13192 }
13193 
13194 static int
13195 wm_get_phy_82575(struct wm_softc *sc)
13196 {
13197 
13198 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13199 		device_xname(sc->sc_dev), __func__));
13200 	return wm_get_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
13201 }
13202 
13203 static void
13204 wm_put_phy_82575(struct wm_softc *sc)
13205 {
13206 
13207 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13208 		device_xname(sc->sc_dev), __func__));
13209 	return wm_put_swfw_semaphore(sc, swfwphysem[sc->sc_funcid]);
13210 }
13211 
13212 static int
13213 wm_get_swfwhw_semaphore(struct wm_softc *sc)
13214 {
13215 	uint32_t ext_ctrl;
13216 	int timeout = 200;
13217 
13218 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13219 		device_xname(sc->sc_dev), __func__));
13220 
13221 	mutex_enter(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
13222 	for (timeout = 0; timeout < 200; timeout++) {
13223 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13224 		ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
13225 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13226 
13227 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13228 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
13229 			return 0;
13230 		delay(5000);
13231 	}
13232 	printf("%s: failed to get swfwhw semaphore ext_ctrl 0x%x\n",
13233 	    device_xname(sc->sc_dev), ext_ctrl);
13234 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
13235 	return 1;
13236 }
13237 
13238 static void
13239 wm_put_swfwhw_semaphore(struct wm_softc *sc)
13240 {
13241 	uint32_t ext_ctrl;
13242 
13243 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13244 		device_xname(sc->sc_dev), __func__));
13245 
13246 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13247 	ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13248 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13249 
13250 	mutex_exit(sc->sc_ich_phymtx); /* Use PHY mtx for both PHY and NVM */
13251 }
13252 
13253 static int
13254 wm_get_swflag_ich8lan(struct wm_softc *sc)
13255 {
13256 	uint32_t ext_ctrl;
13257 	int timeout;
13258 
13259 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13260 		device_xname(sc->sc_dev), __func__));
13261 	mutex_enter(sc->sc_ich_phymtx);
13262 	for (timeout = 0; timeout < WM_PHY_CFG_TIMEOUT; timeout++) {
13263 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13264 		if ((ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) == 0)
13265 			break;
13266 		delay(1000);
13267 	}
13268 	if (timeout >= WM_PHY_CFG_TIMEOUT) {
13269 		printf("%s: SW has already locked the resource\n",
13270 		    device_xname(sc->sc_dev));
13271 		goto out;
13272 	}
13273 
13274 	ext_ctrl |= EXTCNFCTR_MDIO_SW_OWNERSHIP;
13275 	CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13276 	for (timeout = 0; timeout < 1000; timeout++) {
13277 		ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13278 		if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP)
13279 			break;
13280 		delay(1000);
13281 	}
13282 	if (timeout >= 1000) {
13283 		printf("%s: failed to acquire semaphore\n",
13284 		    device_xname(sc->sc_dev));
13285 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13286 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13287 		goto out;
13288 	}
13289 	return 0;
13290 
13291 out:
13292 	mutex_exit(sc->sc_ich_phymtx);
13293 	return 1;
13294 }
13295 
13296 static void
13297 wm_put_swflag_ich8lan(struct wm_softc *sc)
13298 {
13299 	uint32_t ext_ctrl;
13300 
13301 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13302 		device_xname(sc->sc_dev), __func__));
13303 	ext_ctrl = CSR_READ(sc, WMREG_EXTCNFCTR);
13304 	if (ext_ctrl & EXTCNFCTR_MDIO_SW_OWNERSHIP) {
13305 		ext_ctrl &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13306 		CSR_WRITE(sc, WMREG_EXTCNFCTR, ext_ctrl);
13307 	} else {
13308 		printf("%s: Semaphore unexpectedly released\n",
13309 		    device_xname(sc->sc_dev));
13310 	}
13311 
13312 	mutex_exit(sc->sc_ich_phymtx);
13313 }
13314 
13315 static int
13316 wm_get_nvm_ich8lan(struct wm_softc *sc)
13317 {
13318 
13319 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13320 		device_xname(sc->sc_dev), __func__));
13321 	mutex_enter(sc->sc_ich_nvmmtx);
13322 
13323 	return 0;
13324 }
13325 
13326 static void
13327 wm_put_nvm_ich8lan(struct wm_softc *sc)
13328 {
13329 
13330 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13331 		device_xname(sc->sc_dev), __func__));
13332 	mutex_exit(sc->sc_ich_nvmmtx);
13333 }
13334 
13335 static int
13336 wm_get_hw_semaphore_82573(struct wm_softc *sc)
13337 {
13338 	int i = 0;
13339 	uint32_t reg;
13340 
13341 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13342 		device_xname(sc->sc_dev), __func__));
13343 
13344 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13345 	do {
13346 		CSR_WRITE(sc, WMREG_EXTCNFCTR,
13347 		    reg | EXTCNFCTR_MDIO_SW_OWNERSHIP);
13348 		reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13349 		if ((reg & EXTCNFCTR_MDIO_SW_OWNERSHIP) != 0)
13350 			break;
13351 		delay(2*1000);
13352 		i++;
13353 	} while (i < WM_MDIO_OWNERSHIP_TIMEOUT);
13354 
13355 	if (i == WM_MDIO_OWNERSHIP_TIMEOUT) {
13356 		wm_put_hw_semaphore_82573(sc);
13357 		log(LOG_ERR, "%s: Driver can't access the PHY\n",
13358 		    device_xname(sc->sc_dev));
13359 		return -1;
13360 	}
13361 
13362 	return 0;
13363 }
13364 
13365 static void
13366 wm_put_hw_semaphore_82573(struct wm_softc *sc)
13367 {
13368 	uint32_t reg;
13369 
13370 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13371 		device_xname(sc->sc_dev), __func__));
13372 
13373 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13374 	reg &= ~EXTCNFCTR_MDIO_SW_OWNERSHIP;
13375 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
13376 }
13377 
13378 /*
13379  * Management mode and power management related subroutines.
13380  * BMC, AMT, suspend/resume and EEE.
13381  */
13382 
13383 #ifdef WM_WOL
13384 static int
13385 wm_check_mng_mode(struct wm_softc *sc)
13386 {
13387 	int rv;
13388 
13389 	switch (sc->sc_type) {
13390 	case WM_T_ICH8:
13391 	case WM_T_ICH9:
13392 	case WM_T_ICH10:
13393 	case WM_T_PCH:
13394 	case WM_T_PCH2:
13395 	case WM_T_PCH_LPT:
13396 	case WM_T_PCH_SPT:
13397 	case WM_T_PCH_CNP:
13398 		rv = wm_check_mng_mode_ich8lan(sc);
13399 		break;
13400 	case WM_T_82574:
13401 	case WM_T_82583:
13402 		rv = wm_check_mng_mode_82574(sc);
13403 		break;
13404 	case WM_T_82571:
13405 	case WM_T_82572:
13406 	case WM_T_82573:
13407 	case WM_T_80003:
13408 		rv = wm_check_mng_mode_generic(sc);
13409 		break;
13410 	default:
13411 		/* noting to do */
13412 		rv = 0;
13413 		break;
13414 	}
13415 
13416 	return rv;
13417 }
13418 
13419 static int
13420 wm_check_mng_mode_ich8lan(struct wm_softc *sc)
13421 {
13422 	uint32_t fwsm;
13423 
13424 	fwsm = CSR_READ(sc, WMREG_FWSM);
13425 
13426 	if (((fwsm & FWSM_FW_VALID) != 0)
13427 	    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
13428 		return 1;
13429 
13430 	return 0;
13431 }
13432 
13433 static int
13434 wm_check_mng_mode_82574(struct wm_softc *sc)
13435 {
13436 	uint16_t data;
13437 
13438 	wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
13439 
13440 	if ((data & NVM_CFG2_MNGM_MASK) != 0)
13441 		return 1;
13442 
13443 	return 0;
13444 }
13445 
13446 static int
13447 wm_check_mng_mode_generic(struct wm_softc *sc)
13448 {
13449 	uint32_t fwsm;
13450 
13451 	fwsm = CSR_READ(sc, WMREG_FWSM);
13452 
13453 	if (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_IAMT_MODE)
13454 		return 1;
13455 
13456 	return 0;
13457 }
13458 #endif /* WM_WOL */
13459 
13460 static int
13461 wm_enable_mng_pass_thru(struct wm_softc *sc)
13462 {
13463 	uint32_t manc, fwsm, factps;
13464 
13465 	if ((sc->sc_flags & WM_F_ASF_FIRMWARE_PRES) == 0)
13466 		return 0;
13467 
13468 	manc = CSR_READ(sc, WMREG_MANC);
13469 
13470 	DPRINTF(WM_DEBUG_MANAGE, ("%s: MANC (%08x)\n",
13471 		device_xname(sc->sc_dev), manc));
13472 	if ((manc & MANC_RECV_TCO_EN) == 0)
13473 		return 0;
13474 
13475 	if ((sc->sc_flags & WM_F_ARC_SUBSYS_VALID) != 0) {
13476 		fwsm = CSR_READ(sc, WMREG_FWSM);
13477 		factps = CSR_READ(sc, WMREG_FACTPS);
13478 		if (((factps & FACTPS_MNGCG) == 0)
13479 		    && (__SHIFTOUT(fwsm, FWSM_MODE) == MNG_ICH_IAMT_MODE))
13480 			return 1;
13481 	} else if ((sc->sc_type == WM_T_82574) || (sc->sc_type == WM_T_82583)){
13482 		uint16_t data;
13483 
13484 		factps = CSR_READ(sc, WMREG_FACTPS);
13485 		wm_nvm_read(sc, NVM_OFF_CFG2, 1, &data);
13486 		DPRINTF(WM_DEBUG_MANAGE, ("%s: FACTPS = %08x, CFG2=%04x\n",
13487 			device_xname(sc->sc_dev), factps, data));
13488 		if (((factps & FACTPS_MNGCG) == 0)
13489 		    && ((data & NVM_CFG2_MNGM_MASK)
13490 			== (NVM_CFG2_MNGM_PT << NVM_CFG2_MNGM_SHIFT)))
13491 			return 1;
13492 	} else if (((manc & MANC_SMBUS_EN) != 0)
13493 	    && ((manc & MANC_ASF_EN) == 0))
13494 		return 1;
13495 
13496 	return 0;
13497 }
13498 
13499 static bool
13500 wm_phy_resetisblocked(struct wm_softc *sc)
13501 {
13502 	bool blocked = false;
13503 	uint32_t reg;
13504 	int i = 0;
13505 
13506 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13507 		device_xname(sc->sc_dev), __func__));
13508 
13509 	switch (sc->sc_type) {
13510 	case WM_T_ICH8:
13511 	case WM_T_ICH9:
13512 	case WM_T_ICH10:
13513 	case WM_T_PCH:
13514 	case WM_T_PCH2:
13515 	case WM_T_PCH_LPT:
13516 	case WM_T_PCH_SPT:
13517 	case WM_T_PCH_CNP:
13518 		do {
13519 			reg = CSR_READ(sc, WMREG_FWSM);
13520 			if ((reg & FWSM_RSPCIPHY) == 0) {
13521 				blocked = true;
13522 				delay(10*1000);
13523 				continue;
13524 			}
13525 			blocked = false;
13526 		} while (blocked && (i++ < 30));
13527 		return blocked;
13528 		break;
13529 	case WM_T_82571:
13530 	case WM_T_82572:
13531 	case WM_T_82573:
13532 	case WM_T_82574:
13533 	case WM_T_82583:
13534 	case WM_T_80003:
13535 		reg = CSR_READ(sc, WMREG_MANC);
13536 		if ((reg & MANC_BLK_PHY_RST_ON_IDE) != 0)
13537 			return true;
13538 		else
13539 			return false;
13540 		break;
13541 	default:
13542 		/* no problem */
13543 		break;
13544 	}
13545 
13546 	return false;
13547 }
13548 
13549 static void
13550 wm_get_hw_control(struct wm_softc *sc)
13551 {
13552 	uint32_t reg;
13553 
13554 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13555 		device_xname(sc->sc_dev), __func__));
13556 
13557 	if (sc->sc_type == WM_T_82573) {
13558 		reg = CSR_READ(sc, WMREG_SWSM);
13559 		CSR_WRITE(sc, WMREG_SWSM, reg | SWSM_DRV_LOAD);
13560 	} else if (sc->sc_type >= WM_T_82571) {
13561 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13562 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg | CTRL_EXT_DRV_LOAD);
13563 	}
13564 }
13565 
13566 static void
13567 wm_release_hw_control(struct wm_softc *sc)
13568 {
13569 	uint32_t reg;
13570 
13571 	DPRINTF(WM_DEBUG_LOCK, ("%s: %s called\n",
13572 		device_xname(sc->sc_dev), __func__));
13573 
13574 	if (sc->sc_type == WM_T_82573) {
13575 		reg = CSR_READ(sc, WMREG_SWSM);
13576 		CSR_WRITE(sc, WMREG_SWSM, reg & ~SWSM_DRV_LOAD);
13577 	} else if (sc->sc_type >= WM_T_82571) {
13578 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13579 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg & ~CTRL_EXT_DRV_LOAD);
13580 	}
13581 }
13582 
13583 static void
13584 wm_gate_hw_phy_config_ich8lan(struct wm_softc *sc, bool gate)
13585 {
13586 	uint32_t reg;
13587 
13588 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13589 		device_xname(sc->sc_dev), __func__));
13590 
13591 	if (sc->sc_type < WM_T_PCH2)
13592 		return;
13593 
13594 	reg = CSR_READ(sc, WMREG_EXTCNFCTR);
13595 
13596 	if (gate)
13597 		reg |= EXTCNFCTR_GATE_PHY_CFG;
13598 	else
13599 		reg &= ~EXTCNFCTR_GATE_PHY_CFG;
13600 
13601 	CSR_WRITE(sc, WMREG_EXTCNFCTR, reg);
13602 }
13603 
13604 static void
13605 wm_smbustopci(struct wm_softc *sc)
13606 {
13607 	uint32_t fwsm, reg;
13608 	int rv = 0;
13609 
13610 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13611 		device_xname(sc->sc_dev), __func__));
13612 
13613 	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
13614 	wm_gate_hw_phy_config_ich8lan(sc, true);
13615 
13616 	/* Disable ULP */
13617 	wm_ulp_disable(sc);
13618 
13619 	/* Acquire PHY semaphore */
13620 	sc->phy.acquire(sc);
13621 
13622 	fwsm = CSR_READ(sc, WMREG_FWSM);
13623 	switch (sc->sc_type) {
13624 	case WM_T_PCH_LPT:
13625 	case WM_T_PCH_SPT:
13626 	case WM_T_PCH_CNP:
13627 		if (wm_phy_is_accessible_pchlan(sc))
13628 			break;
13629 
13630 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13631 		reg |= CTRL_EXT_FORCE_SMBUS;
13632 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13633 #if 0
13634 		/* XXX Isn't this required??? */
13635 		CSR_WRITE_FLUSH(sc);
13636 #endif
13637 		delay(50 * 1000);
13638 		/* FALLTHROUGH */
13639 	case WM_T_PCH2:
13640 		if (wm_phy_is_accessible_pchlan(sc) == true)
13641 			break;
13642 		/* FALLTHROUGH */
13643 	case WM_T_PCH:
13644 		if (sc->sc_type == WM_T_PCH)
13645 			if ((fwsm & FWSM_FW_VALID) != 0)
13646 				break;
13647 
13648 		if (wm_phy_resetisblocked(sc) == true) {
13649 			printf("XXX reset is blocked(3)\n");
13650 			break;
13651 		}
13652 
13653 		wm_toggle_lanphypc_pch_lpt(sc);
13654 
13655 		if (sc->sc_type >= WM_T_PCH_LPT) {
13656 			if (wm_phy_is_accessible_pchlan(sc) == true)
13657 				break;
13658 
13659 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
13660 			reg &= ~CTRL_EXT_FORCE_SMBUS;
13661 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13662 
13663 			if (wm_phy_is_accessible_pchlan(sc) == true)
13664 				break;
13665 			rv = -1;
13666 		}
13667 		break;
13668 	default:
13669 		break;
13670 	}
13671 
13672 	/* Release semaphore */
13673 	sc->phy.release(sc);
13674 
13675 	if (rv == 0) {
13676 		if (wm_phy_resetisblocked(sc)) {
13677 			printf("XXX reset is blocked(4)\n");
13678 			goto out;
13679 		}
13680 		wm_reset_phy(sc);
13681 		if (wm_phy_resetisblocked(sc))
13682 			printf("XXX reset is blocked(4)\n");
13683 	}
13684 
13685 out:
13686 	/*
13687 	 * Ungate automatic PHY configuration by hardware on non-managed 82579
13688 	 */
13689 	if ((sc->sc_type == WM_T_PCH2) && ((fwsm & FWSM_FW_VALID) == 0)) {
13690 		delay(10*1000);
13691 		wm_gate_hw_phy_config_ich8lan(sc, false);
13692 	}
13693 }
13694 
13695 static void
13696 wm_init_manageability(struct wm_softc *sc)
13697 {
13698 
13699 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13700 		device_xname(sc->sc_dev), __func__));
13701 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
13702 		uint32_t manc2h = CSR_READ(sc, WMREG_MANC2H);
13703 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
13704 
13705 		/* Disable hardware interception of ARP */
13706 		manc &= ~MANC_ARP_EN;
13707 
13708 		/* Enable receiving management packets to the host */
13709 		if (sc->sc_type >= WM_T_82571) {
13710 			manc |= MANC_EN_MNG2HOST;
13711 			manc2h |= MANC2H_PORT_623 | MANC2H_PORT_624;
13712 			CSR_WRITE(sc, WMREG_MANC2H, manc2h);
13713 		}
13714 
13715 		CSR_WRITE(sc, WMREG_MANC, manc);
13716 	}
13717 }
13718 
13719 static void
13720 wm_release_manageability(struct wm_softc *sc)
13721 {
13722 
13723 	if (sc->sc_flags & WM_F_HAS_MANAGE) {
13724 		uint32_t manc = CSR_READ(sc, WMREG_MANC);
13725 
13726 		manc |= MANC_ARP_EN;
13727 		if (sc->sc_type >= WM_T_82571)
13728 			manc &= ~MANC_EN_MNG2HOST;
13729 
13730 		CSR_WRITE(sc, WMREG_MANC, manc);
13731 	}
13732 }
13733 
13734 static void
13735 wm_get_wakeup(struct wm_softc *sc)
13736 {
13737 
13738 	/* 0: HAS_AMT, ARC_SUBSYS_VALID, ASF_FIRMWARE_PRES */
13739 	switch (sc->sc_type) {
13740 	case WM_T_82573:
13741 	case WM_T_82583:
13742 		sc->sc_flags |= WM_F_HAS_AMT;
13743 		/* FALLTHROUGH */
13744 	case WM_T_80003:
13745 	case WM_T_82575:
13746 	case WM_T_82576:
13747 	case WM_T_82580:
13748 	case WM_T_I350:
13749 	case WM_T_I354:
13750 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_MODE) != 0)
13751 			sc->sc_flags |= WM_F_ARC_SUBSYS_VALID;
13752 		/* FALLTHROUGH */
13753 	case WM_T_82541:
13754 	case WM_T_82541_2:
13755 	case WM_T_82547:
13756 	case WM_T_82547_2:
13757 	case WM_T_82571:
13758 	case WM_T_82572:
13759 	case WM_T_82574:
13760 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
13761 		break;
13762 	case WM_T_ICH8:
13763 	case WM_T_ICH9:
13764 	case WM_T_ICH10:
13765 	case WM_T_PCH:
13766 	case WM_T_PCH2:
13767 	case WM_T_PCH_LPT:
13768 	case WM_T_PCH_SPT:
13769 	case WM_T_PCH_CNP:
13770 		sc->sc_flags |= WM_F_HAS_AMT;
13771 		sc->sc_flags |= WM_F_ASF_FIRMWARE_PRES;
13772 		break;
13773 	default:
13774 		break;
13775 	}
13776 
13777 	/* 1: HAS_MANAGE */
13778 	if (wm_enable_mng_pass_thru(sc) != 0)
13779 		sc->sc_flags |= WM_F_HAS_MANAGE;
13780 
13781 	/*
13782 	 * Note that the WOL flags is set after the resetting of the eeprom
13783 	 * stuff
13784 	 */
13785 }
13786 
13787 /*
13788  * Unconfigure Ultra Low Power mode.
13789  * Only for I217 and newer (see below).
13790  */
13791 static void
13792 wm_ulp_disable(struct wm_softc *sc)
13793 {
13794 	uint32_t reg;
13795 	int i = 0;
13796 
13797 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13798 		device_xname(sc->sc_dev), __func__));
13799 	/* Exclude old devices */
13800 	if ((sc->sc_type < WM_T_PCH_LPT)
13801 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_LM)
13802 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I217_V)
13803 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_LM2)
13804 	    || (sc->sc_pcidevid == PCI_PRODUCT_INTEL_I218_V2))
13805 		return;
13806 
13807 	if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) != 0) {
13808 		/* Request ME un-configure ULP mode in the PHY */
13809 		reg = CSR_READ(sc, WMREG_H2ME);
13810 		reg &= ~H2ME_ULP;
13811 		reg |= H2ME_ENFORCE_SETTINGS;
13812 		CSR_WRITE(sc, WMREG_H2ME, reg);
13813 
13814 		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
13815 		while ((CSR_READ(sc, WMREG_FWSM) & FWSM_ULP_CFG_DONE) != 0) {
13816 			if (i++ == 30) {
13817 				printf("%s timed out\n", __func__);
13818 				return;
13819 			}
13820 			delay(10 * 1000);
13821 		}
13822 		reg = CSR_READ(sc, WMREG_H2ME);
13823 		reg &= ~H2ME_ENFORCE_SETTINGS;
13824 		CSR_WRITE(sc, WMREG_H2ME, reg);
13825 
13826 		return;
13827 	}
13828 
13829 	/* Acquire semaphore */
13830 	sc->phy.acquire(sc);
13831 
13832 	/* Toggle LANPHYPC */
13833 	wm_toggle_lanphypc_pch_lpt(sc);
13834 
13835 	/* Unforce SMBus mode in PHY */
13836 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
13837 	if (reg == 0x0000 || reg == 0xffff) {
13838 		uint32_t reg2;
13839 
13840 		printf("%s: Force SMBus first.\n", __func__);
13841 		reg2 = CSR_READ(sc, WMREG_CTRL_EXT);
13842 		reg2 |= CTRL_EXT_FORCE_SMBUS;
13843 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg2);
13844 		delay(50 * 1000);
13845 
13846 		reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, CV_SMB_CTRL);
13847 	}
13848 	reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
13849 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, CV_SMB_CTRL, reg);
13850 
13851 	/* Unforce SMBus mode in MAC */
13852 	reg = CSR_READ(sc, WMREG_CTRL_EXT);
13853 	reg &= ~CTRL_EXT_FORCE_SMBUS;
13854 	CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13855 
13856 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, HV_PM_CTRL);
13857 	reg |= HV_PM_CTRL_K1_ENA;
13858 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, HV_PM_CTRL, reg);
13859 
13860 	reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1);
13861 	reg &= ~(I218_ULP_CONFIG1_IND
13862 	    | I218_ULP_CONFIG1_STICKY_ULP
13863 	    | I218_ULP_CONFIG1_RESET_TO_SMBUS
13864 	    | I218_ULP_CONFIG1_WOL_HOST
13865 	    | I218_ULP_CONFIG1_INBAND_EXIT
13866 	    | I218_ULP_CONFIG1_EN_ULP_LANPHYPC
13867 	    | I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST
13868 	    | I218_ULP_CONFIG1_DIS_SMB_PERST);
13869 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
13870 	reg |= I218_ULP_CONFIG1_START;
13871 	wm_gmii_hv_writereg_locked(sc->sc_dev, 2, I218_ULP_CONFIG1, reg);
13872 
13873 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
13874 	reg &= ~FEXTNVM7_DIS_SMB_PERST;
13875 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
13876 
13877 	/* Release semaphore */
13878 	sc->phy.release(sc);
13879 	wm_gmii_reset(sc);
13880 	delay(50 * 1000);
13881 }
13882 
13883 /* WOL in the newer chipset interfaces (pchlan) */
13884 static void
13885 wm_enable_phy_wakeup(struct wm_softc *sc)
13886 {
13887 #if 0
13888 	uint16_t preg;
13889 
13890 	/* Copy MAC RARs to PHY RARs */
13891 
13892 	/* Copy MAC MTA to PHY MTA */
13893 
13894 	/* Configure PHY Rx Control register */
13895 
13896 	/* Enable PHY wakeup in MAC register */
13897 
13898 	/* Configure and enable PHY wakeup in PHY registers */
13899 
13900 	/* Activate PHY wakeup */
13901 
13902 	/* XXX */
13903 #endif
13904 }
13905 
13906 /* Power down workaround on D3 */
13907 static void
13908 wm_igp3_phy_powerdown_workaround_ich8lan(struct wm_softc *sc)
13909 {
13910 	uint32_t reg;
13911 	int i;
13912 
13913 	for (i = 0; i < 2; i++) {
13914 		/* Disable link */
13915 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
13916 		reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
13917 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13918 
13919 		/*
13920 		 * Call gig speed drop workaround on Gig disable before
13921 		 * accessing any PHY registers
13922 		 */
13923 		if (sc->sc_type == WM_T_ICH8)
13924 			wm_gig_downshift_workaround_ich8lan(sc);
13925 
13926 		/* Write VR power-down enable */
13927 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
13928 		reg &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
13929 		reg |= IGP3_VR_CTRL_MODE_SHUTDOWN;
13930 		sc->sc_mii.mii_writereg(sc->sc_dev, 1, IGP3_VR_CTRL, reg);
13931 
13932 		/* Read it back and test */
13933 		reg = sc->sc_mii.mii_readreg(sc->sc_dev, 1, IGP3_VR_CTRL);
13934 		reg &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
13935 		if ((reg == IGP3_VR_CTRL_MODE_SHUTDOWN) || (i != 0))
13936 			break;
13937 
13938 		/* Issue PHY reset and repeat at most one more time */
13939 		CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_PHY_RESET);
13940 	}
13941 }
13942 
13943 static void
13944 wm_enable_wakeup(struct wm_softc *sc)
13945 {
13946 	uint32_t reg, pmreg;
13947 	pcireg_t pmode;
13948 
13949 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
13950 		device_xname(sc->sc_dev), __func__));
13951 
13952 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
13953 		&pmreg, NULL) == 0)
13954 		return;
13955 
13956 	/* Advertise the wakeup capability */
13957 	CSR_WRITE(sc, WMREG_CTRL, sc->sc_ctrl | CTRL_SWDPIN(2)
13958 	    | CTRL_SWDPIN(3));
13959 	CSR_WRITE(sc, WMREG_WUC, WUC_APME);
13960 
13961 	/* ICH workaround */
13962 	switch (sc->sc_type) {
13963 	case WM_T_ICH8:
13964 	case WM_T_ICH9:
13965 	case WM_T_ICH10:
13966 	case WM_T_PCH:
13967 	case WM_T_PCH2:
13968 	case WM_T_PCH_LPT:
13969 	case WM_T_PCH_SPT:
13970 	case WM_T_PCH_CNP:
13971 		/* Disable gig during WOL */
13972 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
13973 		reg |= PHY_CTRL_D0A_LPLU | PHY_CTRL_GBE_DIS;
13974 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
13975 		if (sc->sc_type == WM_T_PCH)
13976 			wm_gmii_reset(sc);
13977 
13978 		/* Power down workaround */
13979 		if (sc->sc_phytype == WMPHY_82577) {
13980 			struct mii_softc *child;
13981 
13982 			/* Assume that the PHY is copper */
13983 			child = LIST_FIRST(&sc->sc_mii.mii_phys);
13984 			if ((child != NULL) && (child->mii_mpd_rev <= 2))
13985 				sc->sc_mii.mii_writereg(sc->sc_dev, 1,
13986 				    (768 << 5) | 25, 0x0444); /* magic num */
13987 		}
13988 		break;
13989 	default:
13990 		break;
13991 	}
13992 
13993 	/* Keep the laser running on fiber adapters */
13994 	if ((sc->sc_mediatype == WM_MEDIATYPE_FIBER)
13995 	    || (sc->sc_mediatype == WM_MEDIATYPE_SERDES)) {
13996 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
13997 		reg |= CTRL_EXT_SWDPIN(3);
13998 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
13999 	}
14000 
14001 	reg = CSR_READ(sc, WMREG_WUFC) | WUFC_MAG;
14002 #if 0	/* for the multicast packet */
14003 	reg |= WUFC_MC;
14004 	CSR_WRITE(sc, WMREG_RCTL, CSR_READ(sc, WMREG_RCTL) | RCTL_MPE);
14005 #endif
14006 
14007 	if (sc->sc_type >= WM_T_PCH)
14008 		wm_enable_phy_wakeup(sc);
14009 	else {
14010 		CSR_WRITE(sc, WMREG_WUC, CSR_READ(sc, WMREG_WUC) | WUC_PME_EN);
14011 		CSR_WRITE(sc, WMREG_WUFC, reg);
14012 	}
14013 
14014 	if (((sc->sc_type == WM_T_ICH8) || (sc->sc_type == WM_T_ICH9)
14015 		|| (sc->sc_type == WM_T_ICH10) || (sc->sc_type == WM_T_PCH)
14016 		|| (sc->sc_type == WM_T_PCH2))
14017 	    && (sc->sc_phytype == WMPHY_IGP_3))
14018 		wm_igp3_phy_powerdown_workaround_ich8lan(sc);
14019 
14020 	/* Request PME */
14021 	pmode = pci_conf_read(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR);
14022 #if 0
14023 	/* Disable WOL */
14024 	pmode &= ~(PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN);
14025 #else
14026 	/* For WOL */
14027 	pmode |= PCI_PMCSR_PME_STS | PCI_PMCSR_PME_EN;
14028 #endif
14029 	pci_conf_write(sc->sc_pc, sc->sc_pcitag, pmreg + PCI_PMCSR, pmode);
14030 }
14031 
14032 /* Disable ASPM L0s and/or L1 for workaround */
14033 static void
14034 wm_disable_aspm(struct wm_softc *sc)
14035 {
14036 	pcireg_t reg, mask = 0;
14037 	unsigned const char *str = "";
14038 
14039 	/*
14040 	 *  Only for PCIe device which has PCIe capability in the PCI config
14041 	 * space.
14042 	 */
14043 	if (((sc->sc_flags & WM_F_PCIE) == 0) || (sc->sc_pcixe_capoff == 0))
14044 		return;
14045 
14046 	switch (sc->sc_type) {
14047 	case WM_T_82571:
14048 	case WM_T_82572:
14049 		/*
14050 		 * 8257[12] Errata 13: Device Does Not Support PCIe Active
14051 		 * State Power management L1 State (ASPM L1).
14052 		 */
14053 		mask = PCIE_LCSR_ASPM_L1;
14054 		str = "L1 is";
14055 		break;
14056 	case WM_T_82573:
14057 	case WM_T_82574:
14058 	case WM_T_82583:
14059 		/*
14060 		 * The 82573 disappears when PCIe ASPM L0s is enabled.
14061 		 *
14062 		 * The 82574 and 82583 does not support PCIe ASPM L0s with
14063 		 * some chipset.  The document of 82574 and 82583 says that
14064 		 * disabling L0s with some specific chipset is sufficient,
14065 		 * but we follow as of the Intel em driver does.
14066 		 *
14067 		 * References:
14068 		 * Errata 8 of the Specification Update of i82573.
14069 		 * Errata 20 of the Specification Update of i82574.
14070 		 * Errata 9 of the Specification Update of i82583.
14071 		 */
14072 		mask = PCIE_LCSR_ASPM_L1 | PCIE_LCSR_ASPM_L0S;
14073 		str = "L0s and L1 are";
14074 		break;
14075 	default:
14076 		return;
14077 	}
14078 
14079 	reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
14080 	    sc->sc_pcixe_capoff + PCIE_LCSR);
14081 	reg &= ~mask;
14082 	pci_conf_write(sc->sc_pc, sc->sc_pcitag,
14083 	    sc->sc_pcixe_capoff + PCIE_LCSR, reg);
14084 
14085 	/* Print only in wm_attach() */
14086 	if ((sc->sc_flags & WM_F_ATTACHED) == 0)
14087 		aprint_verbose_dev(sc->sc_dev,
14088 		    "ASPM %s disabled to workaround the errata.\n", str);
14089 }
14090 
14091 /* LPLU */
14092 
14093 static void
14094 wm_lplu_d0_disable(struct wm_softc *sc)
14095 {
14096 	struct mii_data *mii = &sc->sc_mii;
14097 	uint32_t reg;
14098 
14099 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14100 		device_xname(sc->sc_dev), __func__));
14101 
14102 	if (sc->sc_phytype == WMPHY_IFE)
14103 		return;
14104 
14105 	switch (sc->sc_type) {
14106 	case WM_T_82571:
14107 	case WM_T_82572:
14108 	case WM_T_82573:
14109 	case WM_T_82575:
14110 	case WM_T_82576:
14111 		reg = mii->mii_readreg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT);
14112 		reg &= ~PMR_D0_LPLU;
14113 		mii->mii_writereg(sc->sc_dev, 1, MII_IGPHY_POWER_MGMT, reg);
14114 		break;
14115 	case WM_T_82580:
14116 	case WM_T_I350:
14117 	case WM_T_I210:
14118 	case WM_T_I211:
14119 		reg = CSR_READ(sc, WMREG_PHPM);
14120 		reg &= ~PHPM_D0A_LPLU;
14121 		CSR_WRITE(sc, WMREG_PHPM, reg);
14122 		break;
14123 	case WM_T_82574:
14124 	case WM_T_82583:
14125 	case WM_T_ICH8:
14126 	case WM_T_ICH9:
14127 	case WM_T_ICH10:
14128 		reg = CSR_READ(sc, WMREG_PHY_CTRL);
14129 		reg &= ~(PHY_CTRL_GBE_DIS | PHY_CTRL_D0A_LPLU);
14130 		CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
14131 		CSR_WRITE_FLUSH(sc);
14132 		break;
14133 	case WM_T_PCH:
14134 	case WM_T_PCH2:
14135 	case WM_T_PCH_LPT:
14136 	case WM_T_PCH_SPT:
14137 	case WM_T_PCH_CNP:
14138 		reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_OEM_BITS);
14139 		reg &= ~(HV_OEM_BITS_A1KDIS | HV_OEM_BITS_LPLU);
14140 		if (wm_phy_resetisblocked(sc) == false)
14141 			reg |= HV_OEM_BITS_ANEGNOW;
14142 		wm_gmii_hv_writereg(sc->sc_dev, 1, HV_OEM_BITS, reg);
14143 		break;
14144 	default:
14145 		break;
14146 	}
14147 }
14148 
14149 /* EEE */
14150 
14151 static void
14152 wm_set_eee_i350(struct wm_softc *sc)
14153 {
14154 	uint32_t ipcnfg, eeer;
14155 
14156 	ipcnfg = CSR_READ(sc, WMREG_IPCNFG);
14157 	eeer = CSR_READ(sc, WMREG_EEER);
14158 
14159 	if ((sc->sc_flags & WM_F_EEE) != 0) {
14160 		ipcnfg |= (IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
14161 		eeer |= (EEER_TX_LPI_EN | EEER_RX_LPI_EN
14162 		    | EEER_LPI_FC);
14163 	} else {
14164 		ipcnfg &= ~(IPCNFG_EEE_1G_AN | IPCNFG_EEE_100M_AN);
14165 		ipcnfg &= ~IPCNFG_10BASE_TE;
14166 		eeer &= ~(EEER_TX_LPI_EN | EEER_RX_LPI_EN
14167 		    | EEER_LPI_FC);
14168 	}
14169 
14170 	CSR_WRITE(sc, WMREG_IPCNFG, ipcnfg);
14171 	CSR_WRITE(sc, WMREG_EEER, eeer);
14172 	CSR_READ(sc, WMREG_IPCNFG); /* XXX flush? */
14173 	CSR_READ(sc, WMREG_EEER); /* XXX flush? */
14174 }
14175 
14176 /*
14177  * Workarounds (mainly PHY related).
14178  * Basically, PHY's workarounds are in the PHY drivers.
14179  */
14180 
14181 /* Work-around for 82566 Kumeran PCS lock loss */
14182 static void
14183 wm_kmrn_lock_loss_workaround_ich8lan(struct wm_softc *sc)
14184 {
14185 	struct mii_data *mii = &sc->sc_mii;
14186 	uint32_t status = CSR_READ(sc, WMREG_STATUS);
14187 	int i;
14188 	int reg;
14189 
14190 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14191 		device_xname(sc->sc_dev), __func__));
14192 
14193 	/* If the link is not up, do nothing */
14194 	if ((status & STATUS_LU) == 0)
14195 		return;
14196 
14197 	/* Nothing to do if the link is other than 1Gbps */
14198 	if (__SHIFTOUT(status, STATUS_SPEED) != STATUS_SPEED_1000)
14199 		return;
14200 
14201 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
14202 	for (i = 0; i < 10; i++) {
14203 		/* read twice */
14204 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
14205 		reg = mii->mii_readreg(sc->sc_dev, 1, IGP3_KMRN_DIAG);
14206 		if ((reg & IGP3_KMRN_DIAG_PCS_LOCK_LOSS) == 0)
14207 			goto out;	/* GOOD! */
14208 
14209 		/* Reset the PHY */
14210 		wm_reset_phy(sc);
14211 		delay(5*1000);
14212 	}
14213 
14214 	/* Disable GigE link negotiation */
14215 	reg = CSR_READ(sc, WMREG_PHY_CTRL);
14216 	reg |= PHY_CTRL_GBE_DIS | PHY_CTRL_NOND0A_GBE_DIS;
14217 	CSR_WRITE(sc, WMREG_PHY_CTRL, reg);
14218 
14219 	/*
14220 	 * Call gig speed drop workaround on Gig disable before accessing
14221 	 * any PHY registers.
14222 	 */
14223 	wm_gig_downshift_workaround_ich8lan(sc);
14224 
14225 out:
14226 	return;
14227 }
14228 
14229 /* WOL from S5 stops working */
14230 static void
14231 wm_gig_downshift_workaround_ich8lan(struct wm_softc *sc)
14232 {
14233 	uint16_t kmreg;
14234 
14235 	/* Only for igp3 */
14236 	if (sc->sc_phytype == WMPHY_IGP_3) {
14237 		if (wm_kmrn_readreg(sc, KUMCTRLSTA_OFFSET_DIAG, &kmreg) != 0)
14238 			return;
14239 		kmreg |= KUMCTRLSTA_DIAG_NELPBK;
14240 		if (wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg) != 0)
14241 			return;
14242 		kmreg &= ~KUMCTRLSTA_DIAG_NELPBK;
14243 		wm_kmrn_writereg(sc, KUMCTRLSTA_OFFSET_DIAG, kmreg);
14244 	}
14245 }
14246 
14247 /*
14248  * Workaround for pch's PHYs
14249  * XXX should be moved to new PHY driver?
14250  */
14251 static void
14252 wm_hv_phy_workaround_ich8lan(struct wm_softc *sc)
14253 {
14254 
14255 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14256 		device_xname(sc->sc_dev), __func__));
14257 	KASSERT(sc->sc_type == WM_T_PCH);
14258 
14259 	if (sc->sc_phytype == WMPHY_82577)
14260 		wm_set_mdio_slow_mode_hv(sc);
14261 
14262 	/* (PCH rev.2) && (82577 && (phy rev 2 or 3)) */
14263 
14264 	/* (82577 && (phy rev 1 or 2)) || (82578 & phy rev 1)*/
14265 
14266 	/* 82578 */
14267 	if (sc->sc_phytype == WMPHY_82578) {
14268 		struct mii_softc *child;
14269 
14270 		/*
14271 		 * Return registers to default by doing a soft reset then
14272 		 * writing 0x3140 to the control register
14273 		 * 0x3140 == BMCR_SPEED0 | BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1
14274 		 */
14275 		child = LIST_FIRST(&sc->sc_mii.mii_phys);
14276 		if ((child != NULL) && (child->mii_mpd_rev < 2)) {
14277 			PHY_RESET(child);
14278 			sc->sc_mii.mii_writereg(sc->sc_dev, 2, MII_BMCR,
14279 			    0x3140);
14280 		}
14281 	}
14282 
14283 	/* Select page 0 */
14284 	sc->phy.acquire(sc);
14285 	wm_gmii_mdic_writereg(sc->sc_dev, 1, MII_IGPHY_PAGE_SELECT, 0);
14286 	sc->phy.release(sc);
14287 
14288 	/*
14289 	 * Configure the K1 Si workaround during phy reset assuming there is
14290 	 * link so that it disables K1 if link is in 1Gbps.
14291 	 */
14292 	wm_k1_gig_workaround_hv(sc, 1);
14293 }
14294 
14295 static void
14296 wm_lv_phy_workaround_ich8lan(struct wm_softc *sc)
14297 {
14298 
14299 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14300 		device_xname(sc->sc_dev), __func__));
14301 	KASSERT(sc->sc_type == WM_T_PCH2);
14302 
14303 	wm_set_mdio_slow_mode_hv(sc);
14304 }
14305 
14306 static int
14307 wm_k1_gig_workaround_hv(struct wm_softc *sc, int link)
14308 {
14309 	int k1_enable = sc->sc_nvm_k1_enabled;
14310 
14311 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14312 		device_xname(sc->sc_dev), __func__));
14313 
14314 	if (sc->phy.acquire(sc) != 0)
14315 		return -1;
14316 
14317 	if (link) {
14318 		k1_enable = 0;
14319 
14320 		/* Link stall fix for link up */
14321 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
14322 		    0x0100);
14323 	} else {
14324 		/* Link stall fix for link down */
14325 		wm_gmii_hv_writereg_locked(sc->sc_dev, 1, IGP3_KMRN_DIAG,
14326 		    0x4100);
14327 	}
14328 
14329 	wm_configure_k1_ich8lan(sc, k1_enable);
14330 	sc->phy.release(sc);
14331 
14332 	return 0;
14333 }
14334 
14335 static void
14336 wm_set_mdio_slow_mode_hv(struct wm_softc *sc)
14337 {
14338 	uint32_t reg;
14339 
14340 	reg = wm_gmii_hv_readreg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL);
14341 	wm_gmii_hv_writereg(sc->sc_dev, 1, HV_KMRN_MODE_CTRL,
14342 	    reg | HV_KMRN_MDIO_SLOW);
14343 }
14344 
14345 static void
14346 wm_configure_k1_ich8lan(struct wm_softc *sc, int k1_enable)
14347 {
14348 	uint32_t ctrl, ctrl_ext, tmp;
14349 	uint16_t kmreg;
14350 	int rv;
14351 
14352 	rv = wm_kmrn_readreg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, &kmreg);
14353 	if (rv != 0)
14354 		return;
14355 
14356 	if (k1_enable)
14357 		kmreg |= KUMCTRLSTA_K1_ENABLE;
14358 	else
14359 		kmreg &= ~KUMCTRLSTA_K1_ENABLE;
14360 
14361 	rv = wm_kmrn_writereg_locked(sc, KUMCTRLSTA_OFFSET_K1_CONFIG, kmreg);
14362 	if (rv != 0)
14363 		return;
14364 
14365 	delay(20);
14366 
14367 	ctrl = CSR_READ(sc, WMREG_CTRL);
14368 	ctrl_ext = CSR_READ(sc, WMREG_CTRL_EXT);
14369 
14370 	tmp = ctrl & ~(CTRL_SPEED_1000 | CTRL_SPEED_100);
14371 	tmp |= CTRL_FRCSPD;
14372 
14373 	CSR_WRITE(sc, WMREG_CTRL, tmp);
14374 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext | CTRL_EXT_SPD_BYPS);
14375 	CSR_WRITE_FLUSH(sc);
14376 	delay(20);
14377 
14378 	CSR_WRITE(sc, WMREG_CTRL, ctrl);
14379 	CSR_WRITE(sc, WMREG_CTRL_EXT, ctrl_ext);
14380 	CSR_WRITE_FLUSH(sc);
14381 	delay(20);
14382 
14383 	return;
14384 }
14385 
14386 /* special case - for 82575 - need to do manual init ... */
14387 static void
14388 wm_reset_init_script_82575(struct wm_softc *sc)
14389 {
14390 	/*
14391 	 * remark: this is untested code - we have no board without EEPROM
14392 	 *  same setup as mentioned int the FreeBSD driver for the i82575
14393 	 */
14394 
14395 	/* SerDes configuration via SERDESCTRL */
14396 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x00, 0x0c);
14397 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x01, 0x78);
14398 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x1b, 0x23);
14399 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCTL, 0x23, 0x15);
14400 
14401 	/* CCM configuration via CCMCTL register */
14402 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x14, 0x00);
14403 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_CCMCTL, 0x10, 0x00);
14404 
14405 	/* PCIe lanes configuration */
14406 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x00, 0xec);
14407 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x61, 0xdf);
14408 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x34, 0x05);
14409 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_GIOCTL, 0x2f, 0x81);
14410 
14411 	/* PCIe PLL Configuration */
14412 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x02, 0x47);
14413 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x14, 0x00);
14414 	wm_82575_write_8bit_ctlr_reg(sc, WMREG_SCCTL, 0x10, 0x00);
14415 }
14416 
14417 static void
14418 wm_reset_mdicnfg_82580(struct wm_softc *sc)
14419 {
14420 	uint32_t reg;
14421 	uint16_t nvmword;
14422 	int rv;
14423 
14424 	if (sc->sc_type != WM_T_82580)
14425 		return;
14426 	if ((sc->sc_flags & WM_F_SGMII) == 0)
14427 		return;
14428 
14429 	rv = wm_nvm_read(sc, NVM_OFF_LAN_FUNC_82580(sc->sc_funcid)
14430 	    + NVM_OFF_CFG3_PORTA, 1, &nvmword);
14431 	if (rv != 0) {
14432 		aprint_error_dev(sc->sc_dev, "%s: failed to read NVM\n",
14433 		    __func__);
14434 		return;
14435 	}
14436 
14437 	reg = CSR_READ(sc, WMREG_MDICNFG);
14438 	if (nvmword & NVM_CFG3_PORTA_EXT_MDIO)
14439 		reg |= MDICNFG_DEST;
14440 	if (nvmword & NVM_CFG3_PORTA_COM_MDIO)
14441 		reg |= MDICNFG_COM_MDIO;
14442 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
14443 }
14444 
14445 #define MII_INVALIDID(x)	(((x) == 0x0000) || ((x) == 0xffff))
14446 
14447 static bool
14448 wm_phy_is_accessible_pchlan(struct wm_softc *sc)
14449 {
14450 	int i;
14451 	uint32_t reg;
14452 	uint16_t id1, id2;
14453 
14454 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14455 		device_xname(sc->sc_dev), __func__));
14456 	id1 = id2 = 0xffff;
14457 	for (i = 0; i < 2; i++) {
14458 		id1 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR1);
14459 		if (MII_INVALIDID(id1))
14460 			continue;
14461 		id2 = wm_gmii_hv_readreg_locked(sc->sc_dev, 2, MII_PHYIDR2);
14462 		if (MII_INVALIDID(id2))
14463 			continue;
14464 		break;
14465 	}
14466 	if (!MII_INVALIDID(id1) && !MII_INVALIDID(id2)) {
14467 		goto out;
14468 	}
14469 
14470 	if (sc->sc_type < WM_T_PCH_LPT) {
14471 		sc->phy.release(sc);
14472 		wm_set_mdio_slow_mode_hv(sc);
14473 		id1 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR1);
14474 		id2 = wm_gmii_hv_readreg(sc->sc_dev, 2, MII_PHYIDR2);
14475 		sc->phy.acquire(sc);
14476 	}
14477 	if (MII_INVALIDID(id1) || MII_INVALIDID(id2)) {
14478 		printf("XXX return with false\n");
14479 		return false;
14480 	}
14481 out:
14482 	if (sc->sc_type >= WM_T_PCH_LPT) {
14483 		/* Only unforce SMBus if ME is not active */
14484 		if ((CSR_READ(sc, WMREG_FWSM) & FWSM_FW_VALID) == 0) {
14485 			/* Unforce SMBus mode in PHY */
14486 			reg = wm_gmii_hv_readreg_locked(sc->sc_dev, 2,
14487 			    CV_SMB_CTRL);
14488 			reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
14489 			wm_gmii_hv_writereg_locked(sc->sc_dev, 2,
14490 			    CV_SMB_CTRL, reg);
14491 
14492 			/* Unforce SMBus mode in MAC */
14493 			reg = CSR_READ(sc, WMREG_CTRL_EXT);
14494 			reg &= ~CTRL_EXT_FORCE_SMBUS;
14495 			CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14496 		}
14497 	}
14498 	return true;
14499 }
14500 
14501 static void
14502 wm_toggle_lanphypc_pch_lpt(struct wm_softc *sc)
14503 {
14504 	uint32_t reg;
14505 	int i;
14506 
14507 	/* Set PHY Config Counter to 50msec */
14508 	reg = CSR_READ(sc, WMREG_FEXTNVM3);
14509 	reg &= ~FEXTNVM3_PHY_CFG_COUNTER_MASK;
14510 	reg |= FEXTNVM3_PHY_CFG_COUNTER_50MS;
14511 	CSR_WRITE(sc, WMREG_FEXTNVM3, reg);
14512 
14513 	/* Toggle LANPHYPC */
14514 	reg = CSR_READ(sc, WMREG_CTRL);
14515 	reg |= CTRL_LANPHYPC_OVERRIDE;
14516 	reg &= ~CTRL_LANPHYPC_VALUE;
14517 	CSR_WRITE(sc, WMREG_CTRL, reg);
14518 	CSR_WRITE_FLUSH(sc);
14519 	delay(1000);
14520 	reg &= ~CTRL_LANPHYPC_OVERRIDE;
14521 	CSR_WRITE(sc, WMREG_CTRL, reg);
14522 	CSR_WRITE_FLUSH(sc);
14523 
14524 	if (sc->sc_type < WM_T_PCH_LPT)
14525 		delay(50 * 1000);
14526 	else {
14527 		i = 20;
14528 
14529 		do {
14530 			delay(5 * 1000);
14531 		} while (((CSR_READ(sc, WMREG_CTRL_EXT) & CTRL_EXT_LPCD) == 0)
14532 		    && i--);
14533 
14534 		delay(30 * 1000);
14535 	}
14536 }
14537 
14538 static int
14539 wm_platform_pm_pch_lpt(struct wm_softc *sc, bool link)
14540 {
14541 	uint32_t reg = __SHIFTIN(link, LTRV_NONSNOOP_REQ)
14542 	    | __SHIFTIN(link, LTRV_SNOOP_REQ) | LTRV_SEND;
14543 	uint32_t rxa;
14544 	uint16_t scale = 0, lat_enc = 0;
14545 	int32_t obff_hwm = 0;
14546 	int64_t lat_ns, value;
14547 
14548 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14549 		device_xname(sc->sc_dev), __func__));
14550 
14551 	if (link) {
14552 		uint16_t max_snoop, max_nosnoop, max_ltr_enc;
14553 		uint32_t status;
14554 		uint16_t speed;
14555 		pcireg_t preg;
14556 
14557 		status = CSR_READ(sc, WMREG_STATUS);
14558 		switch (__SHIFTOUT(status, STATUS_SPEED)) {
14559 		case STATUS_SPEED_10:
14560 			speed = 10;
14561 			break;
14562 		case STATUS_SPEED_100:
14563 			speed = 100;
14564 			break;
14565 		case STATUS_SPEED_1000:
14566 			speed = 1000;
14567 			break;
14568 		default:
14569 			device_printf(sc->sc_dev, "Unknown speed "
14570 			    "(status = %08x)\n", status);
14571 			return -1;
14572 		}
14573 
14574 		/* Rx Packet Buffer Allocation size (KB) */
14575 		rxa = CSR_READ(sc, WMREG_PBA) & PBA_RXA_MASK;
14576 
14577 		/*
14578 		 * Determine the maximum latency tolerated by the device.
14579 		 *
14580 		 * Per the PCIe spec, the tolerated latencies are encoded as
14581 		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
14582 		 * a 10-bit value (0-1023) to provide a range from 1 ns to
14583 		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
14584 		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
14585 		 */
14586 		lat_ns = ((int64_t)rxa * 1024 -
14587 		    (2 * ((int64_t)sc->sc_ethercom.ec_if.if_mtu
14588 			+ ETHER_HDR_LEN))) * 8 * 1000;
14589 		if (lat_ns < 0)
14590 			lat_ns = 0;
14591 		else
14592 			lat_ns /= speed;
14593 		value = lat_ns;
14594 
14595 		while (value > LTRV_VALUE) {
14596 			scale ++;
14597 			value = howmany(value, __BIT(5));
14598 		}
14599 		if (scale > LTRV_SCALE_MAX) {
14600 			printf("%s: Invalid LTR latency scale %d\n",
14601 			    device_xname(sc->sc_dev), scale);
14602 			return -1;
14603 		}
14604 		lat_enc = (uint16_t)(__SHIFTIN(scale, LTRV_SCALE) | value);
14605 
14606 		/* Determine the maximum latency tolerated by the platform */
14607 		preg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
14608 		    WM_PCI_LTR_CAP_LPT);
14609 		max_snoop = preg & 0xffff;
14610 		max_nosnoop = preg >> 16;
14611 
14612 		max_ltr_enc = MAX(max_snoop, max_nosnoop);
14613 
14614 		if (lat_enc > max_ltr_enc) {
14615 			lat_enc = max_ltr_enc;
14616 			lat_ns = __SHIFTOUT(lat_enc, PCI_LTR_MAXSNOOPLAT_VAL)
14617 			    * PCI_LTR_SCALETONS(
14618 				    __SHIFTOUT(lat_enc,
14619 					PCI_LTR_MAXSNOOPLAT_SCALE));
14620 		}
14621 
14622 		if (lat_ns) {
14623 			lat_ns *= speed * 1000;
14624 			lat_ns /= 8;
14625 			lat_ns /= 1000000000;
14626 			obff_hwm = (int32_t)(rxa - lat_ns);
14627 		}
14628 		if ((obff_hwm < 0) || (obff_hwm > SVT_OFF_HWM)) {
14629 			device_printf(sc->sc_dev, "Invalid high water mark %d"
14630 			    "(rxa = %d, lat_ns = %d)\n",
14631 			    obff_hwm, (int32_t)rxa, (int32_t)lat_ns);
14632 			return -1;
14633 		}
14634 	}
14635 	/* Snoop and No-Snoop latencies the same */
14636 	reg |= lat_enc | __SHIFTIN(lat_enc, LTRV_NONSNOOP);
14637 	CSR_WRITE(sc, WMREG_LTRV, reg);
14638 
14639 	/* Set OBFF high water mark */
14640 	reg = CSR_READ(sc, WMREG_SVT) & ~SVT_OFF_HWM;
14641 	reg |= obff_hwm;
14642 	CSR_WRITE(sc, WMREG_SVT, reg);
14643 
14644 	/* Enable OBFF */
14645 	reg = CSR_READ(sc, WMREG_SVCR);
14646 	reg |= SVCR_OFF_EN | SVCR_OFF_MASKINT;
14647 	CSR_WRITE(sc, WMREG_SVCR, reg);
14648 
14649 	return 0;
14650 }
14651 
14652 /*
14653  * I210 Errata 25 and I211 Errata 10
14654  * Slow System Clock.
14655  */
14656 static void
14657 wm_pll_workaround_i210(struct wm_softc *sc)
14658 {
14659 	uint32_t mdicnfg, wuc;
14660 	uint32_t reg;
14661 	pcireg_t pcireg;
14662 	uint32_t pmreg;
14663 	uint16_t nvmword, tmp_nvmword;
14664 	int phyval;
14665 	bool wa_done = false;
14666 	int i;
14667 
14668 	/* Save WUC and MDICNFG registers */
14669 	wuc = CSR_READ(sc, WMREG_WUC);
14670 	mdicnfg = CSR_READ(sc, WMREG_MDICNFG);
14671 
14672 	reg = mdicnfg & ~MDICNFG_DEST;
14673 	CSR_WRITE(sc, WMREG_MDICNFG, reg);
14674 
14675 	if (wm_nvm_read(sc, INVM_AUTOLOAD, 1, &nvmword) != 0)
14676 		nvmword = INVM_DEFAULT_AL;
14677 	tmp_nvmword = nvmword | INVM_PLL_WO_VAL;
14678 
14679 	/* Get Power Management cap offset */
14680 	if (pci_get_capability(sc->sc_pc, sc->sc_pcitag, PCI_CAP_PWRMGMT,
14681 		&pmreg, NULL) == 0)
14682 		return;
14683 	for (i = 0; i < WM_MAX_PLL_TRIES; i++) {
14684 		phyval = wm_gmii_gs40g_readreg(sc->sc_dev, 1,
14685 		    GS40G_PHY_PLL_FREQ_PAGE | GS40G_PHY_PLL_FREQ_REG);
14686 
14687 		if ((phyval & GS40G_PHY_PLL_UNCONF) != GS40G_PHY_PLL_UNCONF) {
14688 			break; /* OK */
14689 		}
14690 
14691 		wa_done = true;
14692 		/* Directly reset the internal PHY */
14693 		reg = CSR_READ(sc, WMREG_CTRL);
14694 		CSR_WRITE(sc, WMREG_CTRL, reg | CTRL_PHY_RESET);
14695 
14696 		reg = CSR_READ(sc, WMREG_CTRL_EXT);
14697 		reg |= CTRL_EXT_PHYPDEN | CTRL_EXT_SDLPE;
14698 		CSR_WRITE(sc, WMREG_CTRL_EXT, reg);
14699 
14700 		CSR_WRITE(sc, WMREG_WUC, 0);
14701 		reg = (INVM_AUTOLOAD << 4) | (tmp_nvmword << 16);
14702 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
14703 
14704 		pcireg = pci_conf_read(sc->sc_pc, sc->sc_pcitag,
14705 		    pmreg + PCI_PMCSR);
14706 		pcireg |= PCI_PMCSR_STATE_D3;
14707 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
14708 		    pmreg + PCI_PMCSR, pcireg);
14709 		delay(1000);
14710 		pcireg &= ~PCI_PMCSR_STATE_D3;
14711 		pci_conf_write(sc->sc_pc, sc->sc_pcitag,
14712 		    pmreg + PCI_PMCSR, pcireg);
14713 
14714 		reg = (INVM_AUTOLOAD << 4) | (nvmword << 16);
14715 		CSR_WRITE(sc, WMREG_EEARBC_I210, reg);
14716 
14717 		/* Restore WUC register */
14718 		CSR_WRITE(sc, WMREG_WUC, wuc);
14719 	}
14720 
14721 	/* Restore MDICNFG setting */
14722 	CSR_WRITE(sc, WMREG_MDICNFG, mdicnfg);
14723 	if (wa_done)
14724 		aprint_verbose_dev(sc->sc_dev, "I210 workaround done\n");
14725 }
14726 
14727 static void
14728 wm_legacy_irq_quirk_spt(struct wm_softc *sc)
14729 {
14730 	uint32_t reg;
14731 
14732 	DPRINTF(WM_DEBUG_INIT, ("%s: %s called\n",
14733 		device_xname(sc->sc_dev), __func__));
14734 	KASSERT(sc->sc_type == WM_T_PCH_SPT);
14735 
14736 	reg = CSR_READ(sc, WMREG_FEXTNVM7);
14737 	reg |= FEXTNVM7_SIDE_CLK_UNGATE;
14738 	CSR_WRITE(sc, WMREG_FEXTNVM7, reg);
14739 
14740 	reg = CSR_READ(sc, WMREG_FEXTNVM9);
14741 	reg |= FEXTNVM9_IOSFSB_CLKGATE_DIS | FEXTNVM9_IOSFSB_CLKREQ_DIS;
14742 	CSR_WRITE(sc, WMREG_FEXTNVM9, reg);
14743 }
14744