xref: /netbsd-src/sys/dev/pci/igc/if_igc.c (revision 972ad69cba10dfb4a4ac1c1f00f31906419718dc)
1*972ad69cSmlelstv /*	$NetBSD: if_igc.c,v 1.17 2024/11/24 11:07:03 mlelstv Exp $	*/
2d0d8f2a5Srin /*	$OpenBSD: if_igc.c,v 1.13 2023/04/28 10:18:57 bluhm Exp $	*/
3d0d8f2a5Srin /*-
4d0d8f2a5Srin  * SPDX-License-Identifier: BSD-2-Clause
5d0d8f2a5Srin  *
6d0d8f2a5Srin  * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
7d0d8f2a5Srin  * All rights reserved.
8d0d8f2a5Srin  * Copyright (c) 2021 Rubicon Communications, LLC (Netgate)
9d0d8f2a5Srin  *
10d0d8f2a5Srin  * Redistribution and use in source and binary forms, with or without
11d0d8f2a5Srin  * modification, are permitted provided that the following conditions
12d0d8f2a5Srin  * are met:
13d0d8f2a5Srin  * 1. Redistributions of source code must retain the above copyright
14d0d8f2a5Srin  *    notice, this list of conditions and the following disclaimer.
15d0d8f2a5Srin  * 2. Redistributions in binary form must reproduce the above copyright
16d0d8f2a5Srin  *    notice, this list of conditions and the following disclaimer in the
17d0d8f2a5Srin  *    documentation and/or other materials provided with the distribution.
18d0d8f2a5Srin  *
19d0d8f2a5Srin  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20d0d8f2a5Srin  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21d0d8f2a5Srin  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22d0d8f2a5Srin  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23d0d8f2a5Srin  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24d0d8f2a5Srin  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25d0d8f2a5Srin  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26d0d8f2a5Srin  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27d0d8f2a5Srin  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28d0d8f2a5Srin  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29d0d8f2a5Srin  * SUCH DAMAGE.
30d0d8f2a5Srin  */
31d0d8f2a5Srin 
32fb38d839Srin #include <sys/cdefs.h>
33*972ad69cSmlelstv __KERNEL_RCSID(0, "$NetBSD: if_igc.c,v 1.17 2024/11/24 11:07:03 mlelstv Exp $");
34fb38d839Srin 
35fb38d839Srin #ifdef _KERNEL_OPT
36fb38d839Srin #include "opt_if_igc.h"
37fb38d839Srin #if 0 /* notyet */
38d0d8f2a5Srin #include "vlan.h"
39fb38d839Srin #endif
40fb38d839Srin #endif
41d0d8f2a5Srin 
42d0d8f2a5Srin #include <sys/param.h>
43d0d8f2a5Srin #include <sys/systm.h>
44fb38d839Srin #include <sys/bus.h>
45fb38d839Srin #include <sys/cpu.h>
46d0d8f2a5Srin #include <sys/device.h>
47d0d8f2a5Srin #include <sys/endian.h>
48fb38d839Srin #include <sys/intr.h>
49fb38d839Srin #include <sys/interrupt.h>
50fb38d839Srin #include <sys/kernel.h>
51fb38d839Srin #include <sys/kmem.h>
52fb38d839Srin #include <sys/mbuf.h>
53fb38d839Srin #include <sys/mutex.h>
54fb38d839Srin #include <sys/socket.h>
55fb38d839Srin #include <sys/workqueue.h>
56fb38d839Srin #include <sys/xcall.h>
57d0d8f2a5Srin 
58fb38d839Srin #include <net/bpf.h>
59d0d8f2a5Srin #include <net/if.h>
60fb38d839Srin #include <net/if_ether.h>
61d0d8f2a5Srin #include <net/if_media.h>
62fb38d839Srin #include <net/if_vlanvar.h>
63fb38d839Srin #include <net/rss_config.h>
64d0d8f2a5Srin 
65d0d8f2a5Srin #include <netinet/in.h>
66d0d8f2a5Srin #include <netinet/ip.h>
67d0d8f2a5Srin #include <netinet/ip6.h>
68fb38d839Srin #include <netinet/tcp.h>
69d0d8f2a5Srin 
70d0d8f2a5Srin #include <dev/pci/pcivar.h>
71d0d8f2a5Srin #include <dev/pci/pcireg.h>
72d0d8f2a5Srin #include <dev/pci/pcidevs.h>
73d0d8f2a5Srin 
74fb38d839Srin #include <dev/pci/igc/if_igc.h>
75fb38d839Srin #include <dev/pci/igc/igc_evcnt.h>
76fb38d839Srin #include <dev/pci/igc/igc_hw.h>
77fb38d839Srin #include <dev/mii/miivar.h>
78fb38d839Srin 
79fb38d839Srin #define IGC_WORKQUEUE_PRI	PRI_SOFTNET
80fb38d839Srin 
81fb38d839Srin #ifndef IGC_RX_INTR_PROCESS_LIMIT_DEFAULT
82fb38d839Srin #define IGC_RX_INTR_PROCESS_LIMIT_DEFAULT	0
83fb38d839Srin #endif
84fb38d839Srin #ifndef IGC_TX_INTR_PROCESS_LIMIT_DEFAULT
85fb38d839Srin #define IGC_TX_INTR_PROCESS_LIMIT_DEFAULT	0
86fb38d839Srin #endif
87fb38d839Srin 
88fb38d839Srin #ifndef IGC_RX_PROCESS_LIMIT_DEFAULT
89fb38d839Srin #define IGC_RX_PROCESS_LIMIT_DEFAULT		256
90fb38d839Srin #endif
91fb38d839Srin #ifndef IGC_TX_PROCESS_LIMIT_DEFAULT
92fb38d839Srin #define IGC_TX_PROCESS_LIMIT_DEFAULT		256
93fb38d839Srin #endif
94fb38d839Srin 
95fb38d839Srin #define	htolem32(p, x)	(*((uint32_t *)(p)) = htole32(x))
96fb38d839Srin #define	htolem64(p, x)	(*((uint64_t *)(p)) = htole64(x))
97fb38d839Srin 
98fb38d839Srin static const struct igc_product {
99fb38d839Srin 	pci_vendor_id_t		igcp_vendor;
100fb38d839Srin 	pci_product_id_t	igcp_product;
101fb38d839Srin 	const char		*igcp_name;
102fb38d839Srin } igc_products[] = {
103fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_IT,
104fb38d839Srin 	    "Intel(R) Ethernet Controller I225-IT(2)" },
105fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_LM,
106fb38d839Srin 	    "Intel(R) Ethernet Controller I226-LM" },
107fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_V,
108fb38d839Srin 	    "Intel(R) Ethernet Controller I226-V" },
109fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_IT,
110fb38d839Srin 	    "Intel(R) Ethernet Controller I226-IT" },
111fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I221_V,
112fb38d839Srin 	    "Intel(R) Ethernet Controller I221-V" },
113fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_BLANK_NVM,
114fb38d839Srin 	    "Intel(R) Ethernet Controller I226(blankNVM)" },
115fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LM,
116fb38d839Srin 	    "Intel(R) Ethernet Controller I225-LM" },
117fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_V,
118fb38d839Srin 	    "Intel(R) Ethernet Controller I225-V" },
119fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I220_V,
120fb38d839Srin 	    "Intel(R) Ethernet Controller I220-V" },
121fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_I,
122fb38d839Srin 	    "Intel(R) Ethernet Controller I225-I" },
123fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_BLANK_NVM,
124fb38d839Srin 	    "Intel(R) Ethernet Controller I225(blankNVM)" },
125fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K,
126fb38d839Srin 	    "Intel(R) Ethernet Controller I225-K" },
127fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K2,
128fb38d839Srin 	    "Intel(R) Ethernet Controller I225-K(2)" },
129fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_K,
130fb38d839Srin 	    "Intel(R) Ethernet Controller I226-K" },
131fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LMVP,
132fb38d839Srin 	    "Intel(R) Ethernet Controller I225-LMvP(2)" },
133fb38d839Srin 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_LMVP,
134fb38d839Srin 	    "Intel(R) Ethernet Controller I226-LMvP" },
135fb38d839Srin 	{ 0, 0, NULL },
136d0d8f2a5Srin };
137d0d8f2a5Srin 
138fb38d839Srin #define	IGC_DF_CFG	0x1
139fb38d839Srin #define	IGC_DF_TX	0x2
140fb38d839Srin #define	IGC_DF_RX	0x4
141fb38d839Srin #define	IGC_DF_MISC	0x8
142fb38d839Srin 
143fb38d839Srin #ifdef IGC_DEBUG_FLAGS
144fb38d839Srin int igc_debug_flags = IGC_DEBUG_FLAGS;
145fb38d839Srin #else
146fb38d839Srin int igc_debug_flags = 0;
147fb38d839Srin #endif
148fb38d839Srin 
149fb38d839Srin #define	DPRINTF(flag, fmt, args...)		do {			\
150fb38d839Srin 	if (igc_debug_flags & (IGC_DF_ ## flag))			\
151fb38d839Srin 		printf("%s: %d: " fmt, __func__, __LINE__, ##args);	\
152fb38d839Srin     } while (0)
153fb38d839Srin 
154d0d8f2a5Srin /*********************************************************************
155d0d8f2a5Srin  *  Function Prototypes
156d0d8f2a5Srin  *********************************************************************/
157fb38d839Srin static int	igc_match(device_t, cfdata_t, void *);
158fb38d839Srin static void	igc_attach(device_t, device_t, void *);
159fb38d839Srin static int	igc_detach(device_t, int);
160d0d8f2a5Srin 
161fb38d839Srin static void	igc_identify_hardware(struct igc_softc *);
162fb38d839Srin static int	igc_adjust_nqueues(struct igc_softc *);
163fb38d839Srin static int	igc_allocate_pci_resources(struct igc_softc *);
164fb38d839Srin static int	igc_allocate_interrupts(struct igc_softc *);
165fb38d839Srin static int	igc_allocate_queues(struct igc_softc *);
166fb38d839Srin static void	igc_free_pci_resources(struct igc_softc *);
167fb38d839Srin static void	igc_free_interrupts(struct igc_softc *);
168fb38d839Srin static void	igc_free_queues(struct igc_softc *);
169fb38d839Srin static void	igc_reset(struct igc_softc *);
170fb38d839Srin static void	igc_init_dmac(struct igc_softc *, uint32_t);
171fb38d839Srin static int	igc_setup_interrupts(struct igc_softc *);
172fb38d839Srin static void	igc_attach_counters(struct igc_softc *sc);
173fb38d839Srin static void	igc_detach_counters(struct igc_softc *sc);
174fb38d839Srin static void	igc_update_counters(struct igc_softc *sc);
175fb38d839Srin static void	igc_clear_counters(struct igc_softc *sc);
176fb38d839Srin static int	igc_setup_msix(struct igc_softc *);
177fb38d839Srin static int	igc_setup_msi(struct igc_softc *);
178fb38d839Srin static int	igc_setup_intx(struct igc_softc *);
179fb38d839Srin static int	igc_dma_malloc(struct igc_softc *, bus_size_t,
180fb38d839Srin 		    struct igc_dma_alloc *);
181fb38d839Srin static void	igc_dma_free(struct igc_softc *, struct igc_dma_alloc *);
182fb38d839Srin static void	igc_setup_interface(struct igc_softc *);
183d0d8f2a5Srin 
184fb38d839Srin static int	igc_init(struct ifnet *);
185fb38d839Srin static int	igc_init_locked(struct igc_softc *);
186fb38d839Srin static void	igc_start(struct ifnet *);
187fb38d839Srin static int	igc_transmit(struct ifnet *, struct mbuf *);
188fb38d839Srin static void	igc_tx_common_locked(struct ifnet *, struct tx_ring *, int);
189fb38d839Srin static bool	igc_txeof(struct tx_ring *, u_int);
190fb38d839Srin static void	igc_intr_barrier(struct igc_softc *);
191fb38d839Srin static void	igc_stop(struct ifnet *, int);
192fb38d839Srin static void	igc_stop_locked(struct igc_softc *);
193fb38d839Srin static int	igc_ioctl(struct ifnet *, u_long, void *);
194fb38d839Srin #ifdef IF_RXR
195fb38d839Srin static int	igc_rxrinfo(struct igc_softc *, struct if_rxrinfo *);
196fb38d839Srin #endif
197fb38d839Srin static void	igc_rxfill(struct rx_ring *);
198fb38d839Srin static void	igc_rxrefill(struct rx_ring *, int);
199fb38d839Srin static bool	igc_rxeof(struct rx_ring *, u_int);
200fb38d839Srin static int	igc_rx_checksum(struct igc_queue *, uint64_t, uint32_t,
201fb38d839Srin 		    uint32_t);
202fb38d839Srin static void	igc_watchdog(struct ifnet *);
203fb38d839Srin static void	igc_tick(void *);
204fb38d839Srin static void	igc_media_status(struct ifnet *, struct ifmediareq *);
205fb38d839Srin static int	igc_media_change(struct ifnet *);
206fb38d839Srin static int	igc_ifflags_cb(struct ethercom *);
207fb38d839Srin static void	igc_set_filter(struct igc_softc *);
208fb38d839Srin static void	igc_update_link_status(struct igc_softc *);
209fb38d839Srin static int	igc_get_buf(struct rx_ring *, int, bool);
210fb38d839Srin static int	igc_tx_ctx_setup(struct tx_ring *, struct mbuf *, int,
211fb38d839Srin 		    uint32_t *, uint32_t *);
212fb38d839Srin static int	igc_tso_setup(struct tx_ring *, struct mbuf *, int,
213fb38d839Srin 		    uint32_t *, uint32_t *);
214d0d8f2a5Srin 
215fb38d839Srin static void	igc_configure_queues(struct igc_softc *);
216fb38d839Srin static void	igc_set_queues(struct igc_softc *, uint32_t, uint32_t, int);
217fb38d839Srin static void	igc_enable_queue(struct igc_softc *, uint32_t);
218fb38d839Srin static void	igc_enable_intr(struct igc_softc *);
219fb38d839Srin static void	igc_disable_intr(struct igc_softc *);
220fb38d839Srin static int	igc_intr_link(void *);
221fb38d839Srin static int	igc_intr_queue(void *);
222fb38d839Srin static int	igc_intr(void *);
223fb38d839Srin static void	igc_handle_queue(void *);
224fb38d839Srin static void	igc_handle_queue_work(struct work *, void *);
225fb38d839Srin static void	igc_sched_handle_queue(struct igc_softc *, struct igc_queue *);
226fb38d839Srin static void	igc_barrier_handle_queue(struct igc_softc *);
227d0d8f2a5Srin 
228fb38d839Srin static int	igc_allocate_transmit_buffers(struct tx_ring *);
229fb38d839Srin static int	igc_setup_transmit_structures(struct igc_softc *);
230fb38d839Srin static int	igc_setup_transmit_ring(struct tx_ring *);
231fb38d839Srin static void	igc_initialize_transmit_unit(struct igc_softc *);
232fb38d839Srin static void	igc_free_transmit_structures(struct igc_softc *);
233fb38d839Srin static void	igc_free_transmit_buffers(struct tx_ring *);
234fb38d839Srin static void	igc_withdraw_transmit_packets(struct tx_ring *, bool);
235fb38d839Srin static int	igc_allocate_receive_buffers(struct rx_ring *);
236fb38d839Srin static int	igc_setup_receive_structures(struct igc_softc *);
237fb38d839Srin static int	igc_setup_receive_ring(struct rx_ring *);
238fb38d839Srin static void	igc_initialize_receive_unit(struct igc_softc *);
239fb38d839Srin static void	igc_free_receive_structures(struct igc_softc *);
240fb38d839Srin static void	igc_free_receive_buffers(struct rx_ring *);
241fb38d839Srin static void	igc_clear_receive_status(struct rx_ring *);
242fb38d839Srin static void	igc_initialize_rss_mapping(struct igc_softc *);
243d0d8f2a5Srin 
244fb38d839Srin static void	igc_get_hw_control(struct igc_softc *);
245fb38d839Srin static void	igc_release_hw_control(struct igc_softc *);
246fb38d839Srin static int	igc_is_valid_ether_addr(uint8_t *);
247fb38d839Srin static void	igc_print_devinfo(struct igc_softc *);
248d0d8f2a5Srin 
249fb38d839Srin CFATTACH_DECL3_NEW(igc, sizeof(struct igc_softc),
250fb38d839Srin     igc_match, igc_attach, igc_detach, NULL, NULL, NULL, 0);
251d0d8f2a5Srin 
252fb38d839Srin static inline int
253fb38d839Srin igc_txdesc_incr(struct igc_softc *sc, int id)
254fb38d839Srin {
255d0d8f2a5Srin 
256fb38d839Srin 	if (++id == sc->num_tx_desc)
257fb38d839Srin 		id = 0;
258fb38d839Srin 	return id;
259fb38d839Srin }
260fb38d839Srin 
261fb38d839Srin static inline int __unused
262fb38d839Srin igc_txdesc_decr(struct igc_softc *sc, int id)
263fb38d839Srin {
264fb38d839Srin 
265fb38d839Srin 	if (--id < 0)
266fb38d839Srin 		id = sc->num_tx_desc - 1;
267fb38d839Srin 	return id;
268fb38d839Srin }
269fb38d839Srin 
270fb38d839Srin static inline void
271fb38d839Srin igc_txdesc_sync(struct tx_ring *txr, int id, int ops)
272fb38d839Srin {
273fb38d839Srin 
274fb38d839Srin 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
275fb38d839Srin 	    id * sizeof(union igc_adv_tx_desc), sizeof(union igc_adv_tx_desc),
276fb38d839Srin 	    ops);
277fb38d839Srin }
278fb38d839Srin 
279fb38d839Srin static inline int
280fb38d839Srin igc_rxdesc_incr(struct igc_softc *sc, int id)
281fb38d839Srin {
282fb38d839Srin 
283fb38d839Srin 	if (++id == sc->num_rx_desc)
284fb38d839Srin 		id = 0;
285fb38d839Srin 	return id;
286fb38d839Srin }
287fb38d839Srin 
288fb38d839Srin static inline int
289fb38d839Srin igc_rxdesc_decr(struct igc_softc *sc, int id)
290fb38d839Srin {
291fb38d839Srin 
292fb38d839Srin 	if (--id < 0)
293fb38d839Srin 		id = sc->num_rx_desc - 1;
294fb38d839Srin 	return id;
295fb38d839Srin }
296fb38d839Srin 
297fb38d839Srin static inline void
298fb38d839Srin igc_rxdesc_sync(struct rx_ring *rxr, int id, int ops)
299fb38d839Srin {
300fb38d839Srin 
301fb38d839Srin 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
302fb38d839Srin 	    id * sizeof(union igc_adv_rx_desc), sizeof(union igc_adv_rx_desc),
303fb38d839Srin 	    ops);
304fb38d839Srin }
305fb38d839Srin 
306fb38d839Srin static const struct igc_product *
307fb38d839Srin igc_lookup(const struct pci_attach_args *pa)
308fb38d839Srin {
309fb38d839Srin 	const struct igc_product *igcp;
310fb38d839Srin 
311fb38d839Srin 	for (igcp = igc_products; igcp->igcp_name != NULL; igcp++) {
312fb38d839Srin 		if (PCI_VENDOR(pa->pa_id) == igcp->igcp_vendor &&
313fb38d839Srin 		    PCI_PRODUCT(pa->pa_id) == igcp->igcp_product)
314fb38d839Srin 			return igcp;
315fb38d839Srin 	}
316fb38d839Srin 	return NULL;
317fb38d839Srin }
318d0d8f2a5Srin 
319d0d8f2a5Srin /*********************************************************************
320d0d8f2a5Srin  *  Device identification routine
321d0d8f2a5Srin  *
322d0d8f2a5Srin  *  igc_match determines if the driver should be loaded on
323d0d8f2a5Srin  *  adapter based on PCI vendor/device id of the adapter.
324d0d8f2a5Srin  *
325d0d8f2a5Srin  *  return 0 on success, positive on failure
326d0d8f2a5Srin  *********************************************************************/
327fb38d839Srin static int
328fb38d839Srin igc_match(device_t parent, cfdata_t match, void *aux)
329d0d8f2a5Srin {
330fb38d839Srin 	struct pci_attach_args *pa = aux;
331fb38d839Srin 
332fb38d839Srin 	if (igc_lookup(pa) != NULL)
333fb38d839Srin 		return 1;
334fb38d839Srin 
335fb38d839Srin 	return 0;
336d0d8f2a5Srin }
337d0d8f2a5Srin 
338d0d8f2a5Srin /*********************************************************************
339d0d8f2a5Srin  *  Device initialization routine
340d0d8f2a5Srin  *
341d0d8f2a5Srin  *  The attach entry point is called when the driver is being loaded.
342d0d8f2a5Srin  *  This routine identifies the type of hardware, allocates all resources
343d0d8f2a5Srin  *  and initializes the hardware.
344d0d8f2a5Srin  *
345d0d8f2a5Srin  *  return 0 on success, positive on failure
346d0d8f2a5Srin  *********************************************************************/
347fb38d839Srin static void
348fb38d839Srin igc_attach(device_t parent, device_t self, void *aux)
349d0d8f2a5Srin {
350fb38d839Srin 	struct pci_attach_args *pa = aux;
351fb38d839Srin 	struct igc_softc *sc = device_private(self);
352d0d8f2a5Srin 	struct igc_hw *hw = &sc->hw;
353d0d8f2a5Srin 
354fb38d839Srin 	const struct igc_product *igcp = igc_lookup(pa);
355fb38d839Srin 	KASSERT(igcp != NULL);
356fb38d839Srin 
357fb38d839Srin 	sc->sc_dev = self;
358fb38d839Srin 	callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE);
359fb38d839Srin 	callout_setfunc(&sc->sc_tick_ch, igc_tick, sc);
360fb38d839Srin 	sc->sc_core_stopping = false;
361fb38d839Srin 
362d0d8f2a5Srin 	sc->osdep.os_sc = sc;
363d0d8f2a5Srin 	sc->osdep.os_pa = *pa;
36474973084Srin #ifndef __aarch64__
3652e96519aSrin 	/*
3662e96519aSrin 	 * XXX PR port-arm/57643
3672e96519aSrin 	 * 64-bit DMA does not work at least for LX2K with 32/64GB memory.
3682e96519aSrin 	 * smmu(4) support may be required.
3692e96519aSrin 	 */
37074973084Srin 	if (pci_dma64_available(pa)) {
37174973084Srin 		aprint_verbose(", 64-bit DMA");
37274973084Srin 		sc->osdep.os_dmat = pa->pa_dmat64;
37374973084Srin 	} else
3742e96519aSrin #endif
37574973084Srin 	{
37674973084Srin 		aprint_verbose(", 32-bit DMA");
37774973084Srin 		sc->osdep.os_dmat = pa->pa_dmat;
37874973084Srin 	}
37974973084Srin 
38074973084Srin 	pci_aprint_devinfo_fancy(pa, "Ethernet controller", igcp->igcp_name, 1);
381d0d8f2a5Srin 
382d0d8f2a5Srin 	/* Determine hardware and mac info */
383d0d8f2a5Srin 	igc_identify_hardware(sc);
384d0d8f2a5Srin 
385d0d8f2a5Srin 	sc->num_tx_desc = IGC_DEFAULT_TXD;
386d0d8f2a5Srin 	sc->num_rx_desc = IGC_DEFAULT_RXD;
387d0d8f2a5Srin 
388d0d8f2a5Srin 	 /* Setup PCI resources */
389fb38d839Srin 	if (igc_allocate_pci_resources(sc)) {
390fb38d839Srin 		aprint_error_dev(sc->sc_dev,
391fb38d839Srin 		    "unable to allocate PCI resources\n");
392d0d8f2a5Srin 		goto err_pci;
393fb38d839Srin 	}
394fb38d839Srin 
395fb38d839Srin 	if (igc_allocate_interrupts(sc)) {
396fb38d839Srin 		aprint_error_dev(sc->sc_dev, "unable to allocate interrupts\n");
397fb38d839Srin 		goto err_pci;
398fb38d839Srin 	}
399d0d8f2a5Srin 
400d0d8f2a5Srin 	/* Allocate TX/RX queues */
401fb38d839Srin 	if (igc_allocate_queues(sc)) {
402fb38d839Srin 		aprint_error_dev(sc->sc_dev, "unable to allocate queues\n");
403fb38d839Srin 		goto err_alloc_intr;
404fb38d839Srin 	}
405d0d8f2a5Srin 
406d0d8f2a5Srin 	/* Do shared code initialization */
407d0d8f2a5Srin 	if (igc_setup_init_funcs(hw, true)) {
408fb38d839Srin 		aprint_error_dev(sc->sc_dev, "unable to initialize\n");
409fb38d839Srin 		goto err_alloc_intr;
410d0d8f2a5Srin 	}
411d0d8f2a5Srin 
412d0d8f2a5Srin 	hw->mac.autoneg = DO_AUTO_NEG;
413d0d8f2a5Srin 	hw->phy.autoneg_wait_to_complete = false;
414d0d8f2a5Srin 	hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
415d0d8f2a5Srin 
416d0d8f2a5Srin 	/* Copper options. */
417d0d8f2a5Srin 	if (hw->phy.media_type == igc_media_type_copper)
418d0d8f2a5Srin 		hw->phy.mdix = AUTO_ALL_MODES;
419d0d8f2a5Srin 
420d0d8f2a5Srin 	/* Set the max frame size. */
421d0d8f2a5Srin 	sc->hw.mac.max_frame_size = 9234;
422d0d8f2a5Srin 
423d0d8f2a5Srin 	/* Allocate multicast array memory. */
424fb38d839Srin 	sc->mta = kmem_alloc(IGC_MTA_LEN, KM_SLEEP);
425d0d8f2a5Srin 
426d0d8f2a5Srin 	/* Check SOL/IDER usage. */
427fb38d839Srin 	if (igc_check_reset_block(hw)) {
428fb38d839Srin 		aprint_error_dev(sc->sc_dev,
429fb38d839Srin 		    "PHY reset is blocked due to SOL/IDER session\n");
430fb38d839Srin 	}
431d0d8f2a5Srin 
432d0d8f2a5Srin 	/* Disable Energy Efficient Ethernet. */
433d0d8f2a5Srin 	sc->hw.dev_spec._i225.eee_disable = true;
434d0d8f2a5Srin 
435d0d8f2a5Srin 	igc_reset_hw(hw);
436d0d8f2a5Srin 
437d0d8f2a5Srin 	/* Make sure we have a good EEPROM before we read from it. */
438d0d8f2a5Srin 	if (igc_validate_nvm_checksum(hw) < 0) {
439d0d8f2a5Srin 		/*
440d0d8f2a5Srin 		 * Some PCI-E parts fail the first check due to
441d0d8f2a5Srin 		 * the link being in sleep state, call it again,
442d0d8f2a5Srin 		 * if it fails a second time its a real issue.
443d0d8f2a5Srin 		 */
444d0d8f2a5Srin 		if (igc_validate_nvm_checksum(hw) < 0) {
445fb38d839Srin 			aprint_error_dev(sc->sc_dev,
446fb38d839Srin 			    "EEPROM checksum invalid\n");
447d0d8f2a5Srin 			goto err_late;
448d0d8f2a5Srin 		}
449d0d8f2a5Srin 	}
450d0d8f2a5Srin 
451d0d8f2a5Srin 	/* Copy the permanent MAC address out of the EEPROM. */
452d0d8f2a5Srin 	if (igc_read_mac_addr(hw) < 0) {
453fb38d839Srin 		aprint_error_dev(sc->sc_dev,
454fb38d839Srin 		    "unable to read MAC address from EEPROM\n");
455d0d8f2a5Srin 		goto err_late;
456d0d8f2a5Srin 	}
457d0d8f2a5Srin 
458d0d8f2a5Srin 	if (!igc_is_valid_ether_addr(hw->mac.addr)) {
459fb38d839Srin 		aprint_error_dev(sc->sc_dev, "invalid MAC address\n");
460d0d8f2a5Srin 		goto err_late;
461d0d8f2a5Srin 	}
462d0d8f2a5Srin 
463fb38d839Srin 	if (igc_setup_interrupts(sc))
464d0d8f2a5Srin 		goto err_late;
465d0d8f2a5Srin 
466fb38d839Srin 	/* Attach counters. */
467fb38d839Srin 	igc_attach_counters(sc);
468fb38d839Srin 
469d0d8f2a5Srin 	/* Setup OS specific network interface. */
470d0d8f2a5Srin 	igc_setup_interface(sc);
471d0d8f2a5Srin 
472fb38d839Srin 	igc_print_devinfo(sc);
473fb38d839Srin 
474d0d8f2a5Srin 	igc_reset(sc);
475d0d8f2a5Srin 	hw->mac.get_link_status = true;
476d0d8f2a5Srin 	igc_update_link_status(sc);
477d0d8f2a5Srin 
478d0d8f2a5Srin 	/* The driver can now take control from firmware. */
479d0d8f2a5Srin 	igc_get_hw_control(sc);
480d0d8f2a5Srin 
481fb38d839Srin 	aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n",
482fb38d839Srin 	    ether_sprintf(sc->hw.mac.addr));
483fb38d839Srin 
484fb38d839Srin 	if (pmf_device_register(self, NULL, NULL))
485fb38d839Srin 		pmf_class_network_register(self, &sc->sc_ec.ec_if);
486fb38d839Srin 	else
487fb38d839Srin 		aprint_error_dev(self, "couldn't establish power handler\n");
488fb38d839Srin 
489d0d8f2a5Srin 	return;
490d0d8f2a5Srin 
491d0d8f2a5Srin  err_late:
492d0d8f2a5Srin 	igc_release_hw_control(sc);
493fb38d839Srin  err_alloc_intr:
494fb38d839Srin 	igc_free_interrupts(sc);
495d0d8f2a5Srin  err_pci:
496d0d8f2a5Srin 	igc_free_pci_resources(sc);
497fb38d839Srin 	kmem_free(sc->mta, IGC_MTA_LEN);
498d0d8f2a5Srin }
499d0d8f2a5Srin 
500d0d8f2a5Srin /*********************************************************************
501d0d8f2a5Srin  *  Device removal routine
502d0d8f2a5Srin  *
503d0d8f2a5Srin  *  The detach entry point is called when the driver is being removed.
504d0d8f2a5Srin  *  This routine stops the adapter and deallocates all the resources
505d0d8f2a5Srin  *  that were allocated for driver operation.
506d0d8f2a5Srin  *
507d0d8f2a5Srin  *  return 0 on success, positive on failure
508d0d8f2a5Srin  *********************************************************************/
509fb38d839Srin static int
510fb38d839Srin igc_detach(device_t self, int flags)
511d0d8f2a5Srin {
512fb38d839Srin 	struct igc_softc *sc = device_private(self);
513fb38d839Srin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
514d0d8f2a5Srin 
515fb38d839Srin 	mutex_enter(&sc->sc_core_lock);
516fb38d839Srin 	igc_stop_locked(sc);
517fb38d839Srin 	mutex_exit(&sc->sc_core_lock);
518fb38d839Srin 
519fb38d839Srin 	igc_detach_counters(sc);
520fb38d839Srin 
521fb38d839Srin 	igc_free_queues(sc);
522d0d8f2a5Srin 
523d0d8f2a5Srin 	igc_phy_hw_reset(&sc->hw);
524d0d8f2a5Srin 	igc_release_hw_control(sc);
525d0d8f2a5Srin 
526d0d8f2a5Srin 	ether_ifdetach(ifp);
527d0d8f2a5Srin 	if_detach(ifp);
528fb38d839Srin 	ifmedia_fini(&sc->media);
529d0d8f2a5Srin 
530fb38d839Srin 	igc_free_interrupts(sc);
531d0d8f2a5Srin 	igc_free_pci_resources(sc);
532fb38d839Srin 	kmem_free(sc->mta, IGC_MTA_LEN);
533d0d8f2a5Srin 
534fb38d839Srin 	mutex_destroy(&sc->sc_core_lock);
535d0d8f2a5Srin 
536d0d8f2a5Srin 	return 0;
537d0d8f2a5Srin }
538d0d8f2a5Srin 
539fb38d839Srin static void
540d0d8f2a5Srin igc_identify_hardware(struct igc_softc *sc)
541d0d8f2a5Srin {
542d0d8f2a5Srin 	struct igc_osdep *os = &sc->osdep;
543d0d8f2a5Srin 	struct pci_attach_args *pa = &os->os_pa;
544d0d8f2a5Srin 
545d0d8f2a5Srin 	/* Save off the information about this board. */
546d0d8f2a5Srin 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
547d0d8f2a5Srin 
548d0d8f2a5Srin 	/* Do shared code init and setup. */
549d0d8f2a5Srin 	if (igc_set_mac_type(&sc->hw)) {
550fb38d839Srin 		aprint_error_dev(sc->sc_dev, "unable to identify hardware\n");
551d0d8f2a5Srin 		return;
552d0d8f2a5Srin 	}
553d0d8f2a5Srin }
554d0d8f2a5Srin 
555fb38d839Srin static int
556d0d8f2a5Srin igc_allocate_pci_resources(struct igc_softc *sc)
557d0d8f2a5Srin {
558d0d8f2a5Srin 	struct igc_osdep *os = &sc->osdep;
559d0d8f2a5Srin 	struct pci_attach_args *pa = &os->os_pa;
560d0d8f2a5Srin 
561fb38d839Srin 	/*
562fb38d839Srin 	 * Enable bus mastering and memory-mapped I/O for sure.
563fb38d839Srin 	 */
564fb38d839Srin 	pcireg_t csr =
565fb38d839Srin 	    pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
566fb38d839Srin 	csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE;
567fb38d839Srin 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
568fb38d839Srin 
569fb38d839Srin 	const pcireg_t memtype =
570fb38d839Srin 	    pci_mapreg_type(pa->pa_pc, pa->pa_tag, IGC_PCIREG);
571d0d8f2a5Srin 	if (pci_mapreg_map(pa, IGC_PCIREG, memtype, 0, &os->os_memt,
572fb38d839Srin 	    &os->os_memh, &os->os_membase, &os->os_memsize)) {
573fb38d839Srin 		aprint_error_dev(sc->sc_dev, "unable to map registers\n");
574d0d8f2a5Srin 		return ENXIO;
575d0d8f2a5Srin 	}
576d0d8f2a5Srin 
577fb38d839Srin 	sc->hw.hw_addr = os->os_membase;
578fb38d839Srin 	sc->hw.back = os;
579d0d8f2a5Srin 
580d0d8f2a5Srin 	return 0;
581d0d8f2a5Srin }
582d0d8f2a5Srin 
583fb38d839Srin static int __unused
584fb38d839Srin igc_adjust_nqueues(struct igc_softc *sc)
585fb38d839Srin {
586fb38d839Srin 	struct pci_attach_args *pa = &sc->osdep.os_pa;
587fb38d839Srin 	int nqueues = MIN(IGC_MAX_NQUEUES, ncpu);
588fb38d839Srin 
589fb38d839Srin 	const int nmsix = pci_msix_count(pa->pa_pc, pa->pa_tag);
590fb38d839Srin 	if (nmsix <= 1)
591fb38d839Srin 		nqueues = 1;
592fb38d839Srin 	else if (nmsix < nqueues + 1)
593fb38d839Srin 		nqueues = nmsix - 1;
594fb38d839Srin 
595fb38d839Srin 	return nqueues;
596fb38d839Srin }
597fb38d839Srin 
598fb38d839Srin static int
599fb38d839Srin igc_allocate_interrupts(struct igc_softc *sc)
600fb38d839Srin {
601fb38d839Srin 	struct pci_attach_args *pa = &sc->osdep.os_pa;
602fb38d839Srin 	int error;
603fb38d839Srin 
604fb38d839Srin #ifndef IGC_DISABLE_MSIX
605fb38d839Srin 	const int nqueues = igc_adjust_nqueues(sc);
606fb38d839Srin 	if (nqueues > 1) {
607fb38d839Srin 		sc->sc_nintrs = nqueues + 1;
608fb38d839Srin 		error = pci_msix_alloc_exact(pa, &sc->sc_intrs, sc->sc_nintrs);
609fb38d839Srin 		if (!error) {
610fb38d839Srin 			sc->sc_nqueues = nqueues;
611fb38d839Srin 			sc->sc_intr_type = PCI_INTR_TYPE_MSIX;
612fb38d839Srin 			return 0;
613fb38d839Srin 		}
614fb38d839Srin 	}
615fb38d839Srin #endif
616fb38d839Srin 
617fb38d839Srin 	/* fallback to MSI */
618fb38d839Srin 	sc->sc_nintrs = sc->sc_nqueues = 1;
619fb38d839Srin 
620fb38d839Srin #ifndef IGC_DISABLE_MSI
621fb38d839Srin 	error = pci_msi_alloc_exact(pa, &sc->sc_intrs, sc->sc_nintrs);
622fb38d839Srin 	if (!error) {
623fb38d839Srin 		sc->sc_intr_type = PCI_INTR_TYPE_MSI;
624fb38d839Srin 		return 0;
625fb38d839Srin 	}
626fb38d839Srin #endif
627fb38d839Srin 
628fb38d839Srin 	/* fallback to INTx */
629fb38d839Srin 
630fb38d839Srin 	error = pci_intx_alloc(pa, &sc->sc_intrs);
631fb38d839Srin 	if (!error) {
632fb38d839Srin 		sc->sc_intr_type = PCI_INTR_TYPE_INTX;
633fb38d839Srin 		return 0;
634fb38d839Srin 	}
635fb38d839Srin 
636fb38d839Srin 	return error;
637fb38d839Srin }
638fb38d839Srin 
639fb38d839Srin static int
640d0d8f2a5Srin igc_allocate_queues(struct igc_softc *sc)
641d0d8f2a5Srin {
642fb38d839Srin 	device_t dev = sc->sc_dev;
643fb38d839Srin 	int rxconf = 0, txconf = 0;
644d0d8f2a5Srin 
645d0d8f2a5Srin 	/* Allocate the top level queue structs. */
646fb38d839Srin 	sc->queues =
647fb38d839Srin 	    kmem_zalloc(sc->sc_nqueues * sizeof(struct igc_queue), KM_SLEEP);
648d0d8f2a5Srin 
649d0d8f2a5Srin 	/* Allocate the TX ring. */
650fb38d839Srin 	sc->tx_rings =
651fb38d839Srin 	    kmem_zalloc(sc->sc_nqueues * sizeof(struct tx_ring), KM_SLEEP);
652d0d8f2a5Srin 
653d0d8f2a5Srin 	/* Allocate the RX ring. */
654fb38d839Srin 	sc->rx_rings =
655fb38d839Srin 	    kmem_zalloc(sc->sc_nqueues * sizeof(struct rx_ring), KM_SLEEP);
656d0d8f2a5Srin 
657d0d8f2a5Srin 	/* Set up the TX queues. */
658fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++, txconf++) {
659fb38d839Srin 		struct tx_ring *txr = &sc->tx_rings[iq];
660fb38d839Srin 		const int tsize = roundup2(
661fb38d839Srin 		    sc->num_tx_desc * sizeof(union igc_adv_tx_desc),
662d0d8f2a5Srin 		    IGC_DBA_ALIGN);
663d0d8f2a5Srin 
664fb38d839Srin 		txr->sc = sc;
665fb38d839Srin 		txr->txr_igcq = &sc->queues[iq];
666fb38d839Srin 		txr->me = iq;
667d0d8f2a5Srin 		if (igc_dma_malloc(sc, tsize, &txr->txdma)) {
668fb38d839Srin 			aprint_error_dev(dev,
669fb38d839Srin 			    "unable to allocate TX descriptor\n");
670fb38d839Srin 			goto fail;
671d0d8f2a5Srin 		}
672d0d8f2a5Srin 		txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr;
673fb38d839Srin 		memset(txr->tx_base, 0, tsize);
674fb38d839Srin 	}
675fb38d839Srin 
676fb38d839Srin 	/* Prepare transmit descriptors and buffers. */
677fb38d839Srin 	if (igc_setup_transmit_structures(sc)) {
678fb38d839Srin 		aprint_error_dev(dev, "unable to setup transmit structures\n");
679fb38d839Srin 		goto fail;
680d0d8f2a5Srin 	}
681d0d8f2a5Srin 
682d0d8f2a5Srin 	/* Set up the RX queues. */
683fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++, rxconf++) {
684fb38d839Srin 		struct rx_ring *rxr = &sc->rx_rings[iq];
685fb38d839Srin 		const int rsize = roundup2(
686fb38d839Srin 		    sc->num_rx_desc * sizeof(union igc_adv_rx_desc),
687d0d8f2a5Srin 		    IGC_DBA_ALIGN);
688d0d8f2a5Srin 
689fb38d839Srin 		rxr->sc = sc;
690fb38d839Srin 		rxr->rxr_igcq = &sc->queues[iq];
691fb38d839Srin 		rxr->me = iq;
692fb38d839Srin #ifdef OPENBSD
693fb38d839Srin 		timeout_set(&rxr->rx_refill, igc_rxrefill, rxr);
694fb38d839Srin #endif
695d0d8f2a5Srin 		if (igc_dma_malloc(sc, rsize, &rxr->rxdma)) {
696fb38d839Srin 			aprint_error_dev(dev,
697fb38d839Srin 			    "unable to allocate RX descriptor\n");
698fb38d839Srin 			goto fail;
699d0d8f2a5Srin 		}
700d0d8f2a5Srin 		rxr->rx_base = (union igc_adv_rx_desc *)rxr->rxdma.dma_vaddr;
701fb38d839Srin 		memset(rxr->rx_base, 0, rsize);
702fb38d839Srin 	}
703fb38d839Srin 
704fb38d839Srin 	sc->rx_mbuf_sz = MCLBYTES;
705fb38d839Srin 	/* Prepare receive descriptors and buffers. */
706fb38d839Srin 	if (igc_setup_receive_structures(sc)) {
707fb38d839Srin 		aprint_error_dev(sc->sc_dev,
708fb38d839Srin 		    "unable to setup receive structures\n");
709fb38d839Srin 		goto fail;
710d0d8f2a5Srin 	}
711d0d8f2a5Srin 
712d0d8f2a5Srin 	/* Set up the queue holding structs. */
713fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
714fb38d839Srin 		struct igc_queue *q = &sc->queues[iq];
715fb38d839Srin 
716fb38d839Srin 		q->sc = sc;
717fb38d839Srin 		q->txr = &sc->tx_rings[iq];
718fb38d839Srin 		q->rxr = &sc->rx_rings[iq];
719d0d8f2a5Srin 	}
720d0d8f2a5Srin 
721d0d8f2a5Srin 	return 0;
722d0d8f2a5Srin 
723d0d8f2a5Srin  fail:
724fb38d839Srin 	for (struct rx_ring *rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
725fb38d839Srin 		igc_dma_free(sc, &rxr->rxdma);
726fb38d839Srin 	for (struct tx_ring *txr = sc->tx_rings; txconf > 0; txr++, txconf--)
727fb38d839Srin 		igc_dma_free(sc, &txr->txdma);
728fb38d839Srin 
729fb38d839Srin 	kmem_free(sc->rx_rings, sc->sc_nqueues * sizeof(struct rx_ring));
730fb38d839Srin 	sc->rx_rings = NULL;
731fb38d839Srin 	kmem_free(sc->tx_rings, sc->sc_nqueues * sizeof(struct tx_ring));
732fb38d839Srin 	sc->tx_rings = NULL;
733fb38d839Srin 	kmem_free(sc->queues, sc->sc_nqueues * sizeof(struct igc_queue));
734fb38d839Srin 	sc->queues = NULL;
735fb38d839Srin 
736d0d8f2a5Srin 	return ENOMEM;
737d0d8f2a5Srin }
738d0d8f2a5Srin 
739fb38d839Srin static void
740d0d8f2a5Srin igc_free_pci_resources(struct igc_softc *sc)
741d0d8f2a5Srin {
742d0d8f2a5Srin 	struct igc_osdep *os = &sc->osdep;
743d0d8f2a5Srin 
744d0d8f2a5Srin 	if (os->os_membase != 0)
745d0d8f2a5Srin 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
746d0d8f2a5Srin 	os->os_membase = 0;
747d0d8f2a5Srin }
748d0d8f2a5Srin 
749fb38d839Srin static void
750fb38d839Srin igc_free_interrupts(struct igc_softc *sc)
751fb38d839Srin {
752fb38d839Srin 	struct pci_attach_args *pa = &sc->osdep.os_pa;
753fb38d839Srin 	pci_chipset_tag_t pc = pa->pa_pc;
754fb38d839Srin 
755fb38d839Srin 	for (int i = 0; i < sc->sc_nintrs; i++) {
756fb38d839Srin 		if (sc->sc_ihs[i] != NULL) {
757fb38d839Srin 			pci_intr_disestablish(pc, sc->sc_ihs[i]);
758fb38d839Srin 			sc->sc_ihs[i] = NULL;
759fb38d839Srin 		}
760fb38d839Srin 	}
761fb38d839Srin 	pci_intr_release(pc, sc->sc_intrs, sc->sc_nintrs);
762fb38d839Srin }
763fb38d839Srin 
764fb38d839Srin static void
765fb38d839Srin igc_free_queues(struct igc_softc *sc)
766fb38d839Srin {
767fb38d839Srin 
768fb38d839Srin 	igc_free_receive_structures(sc);
769fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
770fb38d839Srin 		struct rx_ring *rxr = &sc->rx_rings[iq];
771fb38d839Srin 
772fb38d839Srin 		igc_dma_free(sc, &rxr->rxdma);
773fb38d839Srin 	}
774fb38d839Srin 
775fb38d839Srin 	igc_free_transmit_structures(sc);
776fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
777fb38d839Srin 		struct tx_ring *txr = &sc->tx_rings[iq];
778fb38d839Srin 
779fb38d839Srin 		igc_dma_free(sc, &txr->txdma);
780fb38d839Srin 	}
781fb38d839Srin 
782fb38d839Srin 	kmem_free(sc->rx_rings, sc->sc_nqueues * sizeof(struct rx_ring));
783fb38d839Srin 	kmem_free(sc->tx_rings, sc->sc_nqueues * sizeof(struct tx_ring));
784fb38d839Srin 	kmem_free(sc->queues, sc->sc_nqueues * sizeof(struct igc_queue));
785fb38d839Srin }
786fb38d839Srin 
787d0d8f2a5Srin /*********************************************************************
788d0d8f2a5Srin  *
789d0d8f2a5Srin  *  Initialize the hardware to a configuration as specified by the
790d0d8f2a5Srin  *  adapter structure.
791d0d8f2a5Srin  *
792d0d8f2a5Srin  **********************************************************************/
793fb38d839Srin static void
794d0d8f2a5Srin igc_reset(struct igc_softc *sc)
795d0d8f2a5Srin {
796d0d8f2a5Srin 	struct igc_hw *hw = &sc->hw;
797d0d8f2a5Srin 
798d0d8f2a5Srin 	/* Let the firmware know the OS is in control */
799d0d8f2a5Srin 	igc_get_hw_control(sc);
800d0d8f2a5Srin 
801d0d8f2a5Srin 	/*
802d0d8f2a5Srin 	 * Packet Buffer Allocation (PBA)
803d0d8f2a5Srin 	 * Writing PBA sets the receive portion of the buffer
804d0d8f2a5Srin 	 * the remainder is used for the transmit buffer.
805d0d8f2a5Srin 	 */
806fb38d839Srin 	const uint32_t pba = IGC_PBA_34K;
807d0d8f2a5Srin 
808d0d8f2a5Srin 	/*
809d0d8f2a5Srin 	 * These parameters control the automatic generation (Tx) and
810d0d8f2a5Srin 	 * response (Rx) to Ethernet PAUSE frames.
811d0d8f2a5Srin 	 * - High water mark should allow for at least two frames to be
812d0d8f2a5Srin 	 *   received after sending an XOFF.
813d0d8f2a5Srin 	 * - Low water mark works best when it is very near the high water mark.
814d0d8f2a5Srin 	 *   This allows the receiver to restart by sending XON when it has
815d0d8f2a5Srin 	 *   drained a bit. Here we use an arbitrary value of 1500 which will
816d0d8f2a5Srin 	 *   restart after one full frame is pulled from the buffer. There
817d0d8f2a5Srin 	 *   could be several smaller frames in the buffer and if so they will
818d0d8f2a5Srin 	 *   not trigger the XON until their total number reduces the buffer
819d0d8f2a5Srin 	 *   by 1500.
820d0d8f2a5Srin 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
821d0d8f2a5Srin 	 */
822fb38d839Srin 	const uint16_t rx_buffer_size = (pba & 0xffff) << 10;
823fb38d839Srin 
824d0d8f2a5Srin 	hw->fc.high_water = rx_buffer_size -
825d0d8f2a5Srin 	    roundup2(sc->hw.mac.max_frame_size, 1024);
826d0d8f2a5Srin 	/* 16-byte granularity */
827d0d8f2a5Srin 	hw->fc.low_water = hw->fc.high_water - 16;
828d0d8f2a5Srin 
829d0d8f2a5Srin 	if (sc->fc) /* locally set flow control value? */
830d0d8f2a5Srin 		hw->fc.requested_mode = sc->fc;
831d0d8f2a5Srin 	else
832d0d8f2a5Srin 		hw->fc.requested_mode = igc_fc_full;
833d0d8f2a5Srin 
834d0d8f2a5Srin 	hw->fc.pause_time = IGC_FC_PAUSE_TIME;
835d0d8f2a5Srin 
836d0d8f2a5Srin 	hw->fc.send_xon = true;
837d0d8f2a5Srin 
838d0d8f2a5Srin 	/* Issue a global reset */
839d0d8f2a5Srin 	igc_reset_hw(hw);
840d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_WUC, 0);
841d0d8f2a5Srin 
842d0d8f2a5Srin 	/* and a re-init */
843d0d8f2a5Srin 	if (igc_init_hw(hw) < 0) {
844fb38d839Srin 		aprint_error_dev(sc->sc_dev, "unable to reset hardware\n");
845d0d8f2a5Srin 		return;
846d0d8f2a5Srin 	}
847d0d8f2a5Srin 
848d0d8f2a5Srin 	/* Setup DMA Coalescing */
849d0d8f2a5Srin 	igc_init_dmac(sc, pba);
850d0d8f2a5Srin 
851d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN);
852d0d8f2a5Srin 	igc_get_phy_info(hw);
853d0d8f2a5Srin 	igc_check_for_link(hw);
854d0d8f2a5Srin }
855d0d8f2a5Srin 
856d0d8f2a5Srin /*********************************************************************
857d0d8f2a5Srin  *
858d0d8f2a5Srin  *  Initialize the DMA Coalescing feature
859d0d8f2a5Srin  *
860d0d8f2a5Srin  **********************************************************************/
861fb38d839Srin static void
862d0d8f2a5Srin igc_init_dmac(struct igc_softc *sc, uint32_t pba)
863d0d8f2a5Srin {
864d0d8f2a5Srin 	struct igc_hw *hw = &sc->hw;
865fb38d839Srin 	const uint16_t max_frame_size = sc->hw.mac.max_frame_size;
866fb38d839Srin 	uint32_t reg, status;
867d0d8f2a5Srin 
868d0d8f2a5Srin 	if (sc->dmac == 0) { /* Disabling it */
869fb38d839Srin 		reg = ~IGC_DMACR_DMAC_EN;	/* XXXRO */
870d0d8f2a5Srin 		IGC_WRITE_REG(hw, IGC_DMACR, reg);
871fb38d839Srin 		DPRINTF(MISC, "DMA coalescing disabled\n");
872d0d8f2a5Srin 		return;
873fb38d839Srin 	} else {
874fb38d839Srin 		device_printf(sc->sc_dev, "DMA coalescing enabled\n");
875fb38d839Srin 	}
876d0d8f2a5Srin 
877d0d8f2a5Srin 	/* Set starting threshold */
878d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_DMCTXTH, 0);
879d0d8f2a5Srin 
880fb38d839Srin 	uint16_t hwm = 64 * pba - max_frame_size / 16;
881d0d8f2a5Srin 	if (hwm < 64 * (pba - 6))
882d0d8f2a5Srin 		hwm = 64 * (pba - 6);
883d0d8f2a5Srin 	reg = IGC_READ_REG(hw, IGC_FCRTC);
884d0d8f2a5Srin 	reg &= ~IGC_FCRTC_RTH_COAL_MASK;
885fb38d839Srin 	reg |= (hwm << IGC_FCRTC_RTH_COAL_SHIFT) & IGC_FCRTC_RTH_COAL_MASK;
886d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_FCRTC, reg);
887d0d8f2a5Srin 
888fb38d839Srin 	uint32_t dmac = pba - max_frame_size / 512;
889d0d8f2a5Srin 	if (dmac < pba - 10)
890d0d8f2a5Srin 		dmac = pba - 10;
891d0d8f2a5Srin 	reg = IGC_READ_REG(hw, IGC_DMACR);
892d0d8f2a5Srin 	reg &= ~IGC_DMACR_DMACTHR_MASK;
893fb38d839Srin 	reg |= (dmac << IGC_DMACR_DMACTHR_SHIFT) & IGC_DMACR_DMACTHR_MASK;
894d0d8f2a5Srin 
895d0d8f2a5Srin 	/* transition to L0x or L1 if available..*/
896fb38d839Srin 	reg |= IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK;
897d0d8f2a5Srin 
898d0d8f2a5Srin 	/* Check if status is 2.5Gb backplane connection
899d0d8f2a5Srin 	 * before configuration of watchdog timer, which is
900d0d8f2a5Srin 	 * in msec values in 12.8usec intervals
901d0d8f2a5Srin 	 * watchdog timer= msec values in 32usec intervals
902d0d8f2a5Srin 	 * for non 2.5Gb connection
903d0d8f2a5Srin 	 */
904d0d8f2a5Srin 	status = IGC_READ_REG(hw, IGC_STATUS);
905d0d8f2a5Srin 	if ((status & IGC_STATUS_2P5_SKU) &&
906fb38d839Srin 	    !(status & IGC_STATUS_2P5_SKU_OVER))
907fb38d839Srin 		reg |= (sc->dmac * 5) >> 6;
908d0d8f2a5Srin 	else
909fb38d839Srin 		reg |= sc->dmac >> 5;
910d0d8f2a5Srin 
911d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_DMACR, reg);
912d0d8f2a5Srin 
913d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_DMCRTRH, 0);
914d0d8f2a5Srin 
915d0d8f2a5Srin 	/* Set the interval before transition */
916d0d8f2a5Srin 	reg = IGC_READ_REG(hw, IGC_DMCTLX);
917d0d8f2a5Srin 	reg |= IGC_DMCTLX_DCFLUSH_DIS;
918d0d8f2a5Srin 
919d0d8f2a5Srin 	/*
920fb38d839Srin 	 * in 2.5Gb connection, TTLX unit is 0.4 usec
921fb38d839Srin 	 * which is 0x4*2 = 0xA. But delay is still 4 usec
922d0d8f2a5Srin 	 */
923d0d8f2a5Srin 	status = IGC_READ_REG(hw, IGC_STATUS);
924d0d8f2a5Srin 	if ((status & IGC_STATUS_2P5_SKU) &&
925fb38d839Srin 	    !(status & IGC_STATUS_2P5_SKU_OVER))
926d0d8f2a5Srin 		reg |= 0xA;
927d0d8f2a5Srin 	else
928d0d8f2a5Srin 		reg |= 0x4;
929d0d8f2a5Srin 
930d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_DMCTLX, reg);
931d0d8f2a5Srin 
932d0d8f2a5Srin 	/* free space in tx packet buffer to wake from DMA coal */
933fb38d839Srin 	IGC_WRITE_REG(hw, IGC_DMCTXTH,
934fb38d839Srin 	    (IGC_TXPBSIZE - (2 * max_frame_size)) >> 6);
935d0d8f2a5Srin 
936d0d8f2a5Srin 	/* make low power state decision controlled by DMA coal */
937d0d8f2a5Srin 	reg = IGC_READ_REG(hw, IGC_PCIEMISC);
938d0d8f2a5Srin 	reg &= ~IGC_PCIEMISC_LX_DECISION;
939d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_PCIEMISC, reg);
940d0d8f2a5Srin }
941d0d8f2a5Srin 
942fb38d839Srin static int
943fb38d839Srin igc_setup_interrupts(struct igc_softc *sc)
944d0d8f2a5Srin {
945fb38d839Srin 	int error;
946d0d8f2a5Srin 
947fb38d839Srin 	switch (sc->sc_intr_type) {
948fb38d839Srin 	case PCI_INTR_TYPE_MSIX:
949fb38d839Srin 		error = igc_setup_msix(sc);
950fb38d839Srin 		break;
951fb38d839Srin 	case PCI_INTR_TYPE_MSI:
952fb38d839Srin 		error = igc_setup_msi(sc);
953fb38d839Srin 		break;
954fb38d839Srin 	case PCI_INTR_TYPE_INTX:
955fb38d839Srin 		error = igc_setup_intx(sc);
956fb38d839Srin 		break;
957fb38d839Srin 	default:
958fb38d839Srin 		panic("%s: invalid interrupt type: %d",
959fb38d839Srin 		    device_xname(sc->sc_dev), sc->sc_intr_type);
960d0d8f2a5Srin 	}
961d0d8f2a5Srin 
962d0d8f2a5Srin 	return error;
963d0d8f2a5Srin }
964d0d8f2a5Srin 
965fb38d839Srin static void
966fb38d839Srin igc_attach_counters(struct igc_softc *sc)
967d0d8f2a5Srin {
968fb38d839Srin #ifdef IGC_EVENT_COUNTERS
969d0d8f2a5Srin 
970fb38d839Srin 	/* Global counters */
971fb38d839Srin 	sc->sc_global_evcnts = kmem_zalloc(
972fb38d839Srin 	    IGC_GLOBAL_COUNTERS * sizeof(sc->sc_global_evcnts[0]), KM_SLEEP);
973d0d8f2a5Srin 
974fb38d839Srin 	for (int cnt = 0; cnt < IGC_GLOBAL_COUNTERS; cnt++) {
975fb38d839Srin 		evcnt_attach_dynamic(&sc->sc_global_evcnts[cnt],
976fb38d839Srin 		    igc_global_counters[cnt].type, NULL,
977fb38d839Srin 		    device_xname(sc->sc_dev), igc_global_counters[cnt].name);
978d0d8f2a5Srin 	}
979d0d8f2a5Srin 
980fb38d839Srin 	/* Driver counters */
981fb38d839Srin 	sc->sc_driver_evcnts = kmem_zalloc(
982fb38d839Srin 	    IGC_DRIVER_COUNTERS * sizeof(sc->sc_driver_evcnts[0]), KM_SLEEP);
983fb38d839Srin 
984fb38d839Srin 	for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++) {
985fb38d839Srin 		evcnt_attach_dynamic(&sc->sc_driver_evcnts[cnt],
986fb38d839Srin 		    igc_driver_counters[cnt].type, NULL,
987fb38d839Srin 		    device_xname(sc->sc_dev), igc_driver_counters[cnt].name);
988fb38d839Srin 	}
989fb38d839Srin 
990fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
991fb38d839Srin 		struct igc_queue *q = &sc->queues[iq];
992fb38d839Srin 
993fb38d839Srin 		q->igcq_driver_counters = kmem_zalloc(
994fb38d839Srin 		    IGC_DRIVER_COUNTERS * sizeof(q->igcq_driver_counters[0]),
995fb38d839Srin 		    KM_SLEEP);
996fb38d839Srin 	}
997fb38d839Srin 
998fb38d839Srin 	/* Queue counters */
999fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1000fb38d839Srin 		struct igc_queue *q = &sc->queues[iq];
1001fb38d839Srin 
1002fb38d839Srin 		snprintf(q->igcq_queue_evname, sizeof(q->igcq_queue_evname),
1003fb38d839Srin 		    "%s q%d", device_xname(sc->sc_dev), iq);
1004fb38d839Srin 
1005fb38d839Srin 		q->igcq_queue_evcnts = kmem_zalloc(
1006fb38d839Srin 		    IGC_QUEUE_COUNTERS * sizeof(q->igcq_queue_evcnts[0]),
1007fb38d839Srin 		    KM_SLEEP);
1008fb38d839Srin 
1009fb38d839Srin 		for (int cnt = 0; cnt < IGC_QUEUE_COUNTERS; cnt++) {
1010fb38d839Srin 			evcnt_attach_dynamic(&q->igcq_queue_evcnts[cnt],
1011fb38d839Srin 			    igc_queue_counters[cnt].type, NULL,
1012fb38d839Srin 			    q->igcq_queue_evname, igc_queue_counters[cnt].name);
1013fb38d839Srin 		}
1014fb38d839Srin 	}
1015fb38d839Srin 
1016fb38d839Srin 	/* MAC counters */
1017fb38d839Srin 	snprintf(sc->sc_mac_evname, sizeof(sc->sc_mac_evname),
1018fb38d839Srin 	    "%s Mac Statistics", device_xname(sc->sc_dev));
1019fb38d839Srin 
1020fb38d839Srin 	sc->sc_mac_evcnts = kmem_zalloc(
1021fb38d839Srin 	    IGC_MAC_COUNTERS * sizeof(sc->sc_mac_evcnts[0]), KM_SLEEP);
1022fb38d839Srin 
1023fb38d839Srin 	for (int cnt = 0; cnt < IGC_MAC_COUNTERS; cnt++) {
1024fb38d839Srin 		evcnt_attach_dynamic(&sc->sc_mac_evcnts[cnt], EVCNT_TYPE_MISC,
1025fb38d839Srin 		    NULL, sc->sc_mac_evname, igc_mac_counters[cnt].name);
1026fb38d839Srin 	}
1027fb38d839Srin #endif
1028fb38d839Srin }
1029fb38d839Srin 
1030fb38d839Srin static void
1031fb38d839Srin igc_detach_counters(struct igc_softc *sc)
1032fb38d839Srin {
1033fb38d839Srin #ifdef IGC_EVENT_COUNTERS
1034fb38d839Srin 
1035fb38d839Srin 	/* Global counters */
1036fb38d839Srin 	for (int cnt = 0; cnt < IGC_GLOBAL_COUNTERS; cnt++)
1037fb38d839Srin 		evcnt_detach(&sc->sc_global_evcnts[cnt]);
1038fb38d839Srin 
1039fb38d839Srin 	kmem_free(sc->sc_global_evcnts,
1040fb38d839Srin 	    IGC_GLOBAL_COUNTERS * sizeof(sc->sc_global_evcnts));
1041fb38d839Srin 
1042fb38d839Srin 	/* Driver counters */
1043fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1044fb38d839Srin 		struct igc_queue *q = &sc->queues[iq];
1045fb38d839Srin 
1046fb38d839Srin 		kmem_free(q->igcq_driver_counters,
1047fb38d839Srin 		    IGC_DRIVER_COUNTERS * sizeof(q->igcq_driver_counters[0]));
1048fb38d839Srin 	}
1049fb38d839Srin 
1050fb38d839Srin 	for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++)
1051fb38d839Srin 		evcnt_detach(&sc->sc_driver_evcnts[cnt]);
1052fb38d839Srin 
1053fb38d839Srin 	kmem_free(sc->sc_driver_evcnts,
1054fb38d839Srin 	    IGC_DRIVER_COUNTERS * sizeof(sc->sc_driver_evcnts));
1055fb38d839Srin 
1056fb38d839Srin 	/* Queue counters */
1057fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1058fb38d839Srin 		struct igc_queue *q = &sc->queues[iq];
1059fb38d839Srin 
1060fb38d839Srin 		for (int cnt = 0; cnt < IGC_QUEUE_COUNTERS; cnt++)
1061fb38d839Srin 			evcnt_detach(&q->igcq_queue_evcnts[cnt]);
1062fb38d839Srin 
1063fb38d839Srin 		kmem_free(q->igcq_queue_evcnts,
1064fb38d839Srin 		    IGC_QUEUE_COUNTERS * sizeof(q->igcq_queue_evcnts[0]));
1065fb38d839Srin 	}
1066fb38d839Srin 
1067fb38d839Srin 	/* MAC statistics */
1068fb38d839Srin 	for (int cnt = 0; cnt < IGC_MAC_COUNTERS; cnt++)
1069fb38d839Srin 		evcnt_detach(&sc->sc_mac_evcnts[cnt]);
1070fb38d839Srin 
1071fb38d839Srin 	kmem_free(sc->sc_mac_evcnts,
1072fb38d839Srin 	    IGC_MAC_COUNTERS * sizeof(sc->sc_mac_evcnts[0]));
1073fb38d839Srin #endif
1074fb38d839Srin }
1075fb38d839Srin 
1076fb38d839Srin /*
1077fb38d839Srin  * XXX
1078fb38d839Srin  * FreeBSD uses 4-byte-wise read for 64-bit counters, while Linux just
1079fb38d839Srin  * drops hi words.
1080fb38d839Srin  */
1081fb38d839Srin static inline uint64_t __unused
1082fb38d839Srin igc_read_mac_counter(struct igc_hw *hw, bus_size_t reg, bool is64)
1083fb38d839Srin {
1084fb38d839Srin 	uint64_t val;
1085fb38d839Srin 
1086fb38d839Srin 	val = IGC_READ_REG(hw, reg);
1087fb38d839Srin 	if (is64)
1088fb38d839Srin 		val += ((uint64_t)IGC_READ_REG(hw, reg + 4)) << 32;
1089fb38d839Srin 	return val;
1090fb38d839Srin }
1091fb38d839Srin 
1092fb38d839Srin static void
1093fb38d839Srin igc_update_counters(struct igc_softc *sc)
1094fb38d839Srin {
1095fb38d839Srin #ifdef IGC_EVENT_COUNTERS
1096fb38d839Srin 
1097fb38d839Srin 	/* Global counters: nop */
1098fb38d839Srin 
1099fb38d839Srin 	/* Driver counters */
1100fb38d839Srin 	uint64_t sum[IGC_DRIVER_COUNTERS];
1101fb38d839Srin 
1102fb38d839Srin 	memset(sum, 0, sizeof(sum));
1103fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1104fb38d839Srin 		struct igc_queue *q = &sc->queues[iq];
1105fb38d839Srin 
1106fb38d839Srin 		for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++) {
1107fb38d839Srin 			sum[cnt] += IGC_QUEUE_DRIVER_COUNTER_VAL(q, cnt);
1108fb38d839Srin 			IGC_QUEUE_DRIVER_COUNTER_STORE(q, cnt, 0);
1109fb38d839Srin 		}
1110fb38d839Srin 	}
1111fb38d839Srin 
1112fb38d839Srin 	for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++)
1113fb38d839Srin 		IGC_DRIVER_COUNTER_ADD(sc, cnt, sum[cnt]);
1114fb38d839Srin 
1115fb38d839Srin 	/* Queue counters: nop */
1116fb38d839Srin 
1117fb38d839Srin 	/* Mac statistics */
1118fb38d839Srin 	struct igc_hw *hw = &sc->hw;
1119d50b529bSmsaitoh 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1120d50b529bSmsaitoh 	uint64_t iqdrops = 0;
1121fb38d839Srin 
1122fb38d839Srin 	for (int cnt = 0; cnt < IGC_MAC_COUNTERS; cnt++) {
1123d50b529bSmsaitoh 		uint64_t val;
1124d50b529bSmsaitoh 		bus_size_t regaddr = igc_mac_counters[cnt].reg;
1125d50b529bSmsaitoh 
1126d50b529bSmsaitoh 		val = igc_read_mac_counter(hw, regaddr,
1127d50b529bSmsaitoh 		    igc_mac_counters[cnt].is64);
1128d50b529bSmsaitoh 		IGC_MAC_COUNTER_ADD(sc, cnt, val);
1129d50b529bSmsaitoh 		/* XXX Count MPC to iqdrops. */
1130d50b529bSmsaitoh 		if (regaddr == IGC_MPC)
1131d50b529bSmsaitoh 			iqdrops += val;
1132fb38d839Srin 	}
1133d50b529bSmsaitoh 
1134d50b529bSmsaitoh 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1135d50b529bSmsaitoh 		uint32_t val;
1136d50b529bSmsaitoh 
1137d50b529bSmsaitoh 		/* XXX RQDPC should be visible via evcnt(9). */
1138d50b529bSmsaitoh 		val = IGC_READ_REG(hw, IGC_RQDPC(iq));
1139d50b529bSmsaitoh 
1140d50b529bSmsaitoh 		/* RQDPC is not cleard on read. */
1141d50b529bSmsaitoh 		if (val != 0)
1142d50b529bSmsaitoh 			IGC_WRITE_REG(hw, IGC_RQDPC(iq), 0);
1143d50b529bSmsaitoh 		iqdrops += val;
1144d50b529bSmsaitoh 	}
1145d50b529bSmsaitoh 
1146d50b529bSmsaitoh 	if (iqdrops != 0)
1147d50b529bSmsaitoh 		if_statadd(ifp, if_iqdrops, iqdrops);
1148fb38d839Srin #endif
1149fb38d839Srin }
1150fb38d839Srin 
1151fb38d839Srin static void
1152fb38d839Srin igc_clear_counters(struct igc_softc *sc)
1153fb38d839Srin {
1154fb38d839Srin #ifdef IGC_EVENT_COUNTERS
1155fb38d839Srin 
1156fb38d839Srin 	/* Global counters */
1157fb38d839Srin 	for (int cnt = 0; cnt < IGC_GLOBAL_COUNTERS; cnt++)
1158fb38d839Srin 		IGC_GLOBAL_COUNTER_STORE(sc, cnt, 0);
1159fb38d839Srin 
1160fb38d839Srin 	/* Driver counters */
1161fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1162fb38d839Srin 		struct igc_queue *q = &sc->queues[iq];
1163fb38d839Srin 
1164fb38d839Srin 		for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++)
1165fb38d839Srin 			IGC_QUEUE_DRIVER_COUNTER_STORE(q, cnt, 0);
1166fb38d839Srin 	}
1167fb38d839Srin 
1168fb38d839Srin 	for (int cnt = 0; cnt < IGC_DRIVER_COUNTERS; cnt++)
1169fb38d839Srin 		IGC_DRIVER_COUNTER_STORE(sc, cnt, 0);
1170fb38d839Srin 
1171fb38d839Srin 	/* Queue counters */
1172fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1173fb38d839Srin 		struct igc_queue *q = &sc->queues[iq];
1174fb38d839Srin 
1175fb38d839Srin 		for (int cnt = 0; cnt < IGC_QUEUE_COUNTERS; cnt++)
1176fb38d839Srin 			IGC_QUEUE_COUNTER_STORE(q, cnt, 0);
1177fb38d839Srin 	}
1178fb38d839Srin 
1179fb38d839Srin 	/* Mac statistics */
1180fb38d839Srin 	struct igc_hw *hw = &sc->hw;
1181fb38d839Srin 
1182fb38d839Srin 	for (int cnt = 0; cnt < IGC_MAC_COUNTERS; cnt++) {
1183fb38d839Srin 		(void)igc_read_mac_counter(hw, igc_mac_counters[cnt].reg,
1184fb38d839Srin 		    igc_mac_counters[cnt].is64);
1185fb38d839Srin 		IGC_MAC_COUNTER_STORE(sc, cnt, 0);
1186fb38d839Srin 	}
1187fb38d839Srin #endif
1188fb38d839Srin }
1189fb38d839Srin 
1190fb38d839Srin static int
1191fb38d839Srin igc_setup_msix(struct igc_softc *sc)
1192fb38d839Srin {
1193fb38d839Srin 	pci_chipset_tag_t pc = sc->osdep.os_pa.pa_pc;
1194fb38d839Srin 	device_t dev = sc->sc_dev;
1195fb38d839Srin 	pci_intr_handle_t *intrs;
1196fb38d839Srin 	void **ihs;
1197fb38d839Srin 	const char *intrstr;
1198fb38d839Srin 	char intrbuf[PCI_INTRSTR_LEN];
1199fb38d839Srin 	char xnamebuf[MAX(32, MAXCOMLEN)];
1200fb38d839Srin 	int iq, error;
1201fb38d839Srin 
1202fb38d839Srin 	for (iq = 0, intrs = sc->sc_intrs, ihs = sc->sc_ihs;
1203fb38d839Srin 	    iq < sc->sc_nqueues; iq++, intrs++, ihs++) {
1204fb38d839Srin 		struct igc_queue *q = &sc->queues[iq];
1205fb38d839Srin 
1206fb38d839Srin 		snprintf(xnamebuf, sizeof(xnamebuf), "%s: txrx %d",
1207fb38d839Srin 		    device_xname(dev), iq);
1208fb38d839Srin 
1209fb38d839Srin 		intrstr = pci_intr_string(pc, *intrs, intrbuf, sizeof(intrbuf));
1210fb38d839Srin 
1211fb38d839Srin 		pci_intr_setattr(pc, intrs, PCI_INTR_MPSAFE, true);
1212fb38d839Srin 		*ihs = pci_intr_establish_xname(pc, *intrs, IPL_NET,
1213fb38d839Srin 		    igc_intr_queue, q, xnamebuf);
1214fb38d839Srin 		if (*ihs == NULL) {
1215fb38d839Srin 			aprint_error_dev(dev,
1216fb38d839Srin 			    "unable to establish txrx interrupt at %s\n",
1217fb38d839Srin 			    intrstr);
1218fb38d839Srin 			return ENOBUFS;
1219fb38d839Srin 		}
1220fb38d839Srin 		aprint_normal_dev(dev, "txrx interrupting at %s\n", intrstr);
1221fb38d839Srin 
1222fb38d839Srin 		kcpuset_t *affinity;
1223fb38d839Srin 		kcpuset_create(&affinity, true);
1224fb38d839Srin 		kcpuset_set(affinity, iq % ncpu);
1225fb38d839Srin 		error = interrupt_distribute(*ihs, affinity, NULL);
1226fb38d839Srin 		if (error) {
1227fb38d839Srin 			aprint_normal_dev(dev,
1228fb38d839Srin 			    "%s: unable to change affinity, use default CPU\n",
1229fb38d839Srin 			    intrstr);
1230fb38d839Srin 		}
1231fb38d839Srin 		kcpuset_destroy(affinity);
1232fb38d839Srin 
1233fb38d839Srin 		q->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1234fb38d839Srin 		    igc_handle_queue, q);
1235fb38d839Srin 		if (q->igcq_si == NULL) {
1236fb38d839Srin 			aprint_error_dev(dev,
1237fb38d839Srin 			    "%s: unable to establish softint\n", intrstr);
1238fb38d839Srin 			return ENOBUFS;
1239fb38d839Srin 		}
1240fb38d839Srin 
1241fb38d839Srin 		q->msix = iq;
1242fb38d839Srin 		q->eims = 1 << iq;
1243fb38d839Srin 	}
1244fb38d839Srin 
1245fb38d839Srin 	snprintf(xnamebuf, MAXCOMLEN, "%s_tx_rx", device_xname(dev));
1246fb38d839Srin 	error = workqueue_create(&sc->sc_queue_wq, xnamebuf,
1247fb38d839Srin 	    igc_handle_queue_work, sc, IGC_WORKQUEUE_PRI, IPL_NET,
1248fb38d839Srin 	    WQ_PERCPU | WQ_MPSAFE);
1249fb38d839Srin 	if (error) {
1250fb38d839Srin 		aprint_error_dev(dev, "workqueue_create failed\n");
1251fb38d839Srin 		return ENOBUFS;
1252fb38d839Srin 	}
1253fb38d839Srin 	sc->sc_txrx_workqueue = false;
1254fb38d839Srin 
1255fb38d839Srin 	intrstr = pci_intr_string(pc, *intrs, intrbuf, sizeof(intrbuf));
1256fb38d839Srin 	snprintf(xnamebuf, sizeof(xnamebuf), "%s: link", device_xname(dev));
1257fb38d839Srin 	pci_intr_setattr(pc, intrs, PCI_INTR_MPSAFE, true);
1258fb38d839Srin 	*ihs = pci_intr_establish_xname(pc, *intrs, IPL_NET,
1259fb38d839Srin 	    igc_intr_link, sc, xnamebuf);
1260fb38d839Srin 	if (*ihs == NULL) {
1261fb38d839Srin 		aprint_error_dev(dev,
1262fb38d839Srin 		    "unable to establish link interrupt at %s\n", intrstr);
1263fb38d839Srin 		return ENOBUFS;
1264fb38d839Srin 	}
1265fb38d839Srin 	aprint_normal_dev(dev, "link interrupting at %s\n", intrstr);
1266fb38d839Srin 	/* use later in igc_configure_queues() */
1267fb38d839Srin 	sc->linkvec = iq;
1268fb38d839Srin 
1269fb38d839Srin 	return 0;
1270fb38d839Srin }
1271fb38d839Srin 
1272fb38d839Srin static int
1273fb38d839Srin igc_setup_msi(struct igc_softc *sc)
1274fb38d839Srin {
1275fb38d839Srin 	pci_chipset_tag_t pc = sc->osdep.os_pa.pa_pc;
1276fb38d839Srin 	device_t dev = sc->sc_dev;
1277fb38d839Srin 	pci_intr_handle_t *intr = sc->sc_intrs;
1278fb38d839Srin 	void **ihs = sc->sc_ihs;
1279fb38d839Srin 	const char *intrstr;
1280fb38d839Srin 	char intrbuf[PCI_INTRSTR_LEN];
1281fb38d839Srin 	char xnamebuf[MAX(32, MAXCOMLEN)];
1282fb38d839Srin 	int error;
1283fb38d839Srin 
1284fb38d839Srin 	intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
1285fb38d839Srin 
1286fb38d839Srin 	snprintf(xnamebuf, sizeof(xnamebuf), "%s: msi", device_xname(dev));
1287fb38d839Srin 	pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
1288fb38d839Srin 	*ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
1289fb38d839Srin 	    igc_intr, sc, xnamebuf);
1290fb38d839Srin 	if (*ihs == NULL) {
1291fb38d839Srin 		aprint_error_dev(dev,
1292fb38d839Srin 		    "unable to establish interrupt at %s\n", intrstr);
1293fb38d839Srin 		return ENOBUFS;
1294fb38d839Srin 	}
1295fb38d839Srin 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
1296fb38d839Srin 
1297fb38d839Srin 	struct igc_queue *iq = sc->queues;
1298fb38d839Srin 	iq->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1299fb38d839Srin 	    igc_handle_queue, iq);
1300fb38d839Srin 	if (iq->igcq_si == NULL) {
1301fb38d839Srin 		aprint_error_dev(dev,
1302fb38d839Srin 		    "%s: unable to establish softint\n", intrstr);
1303fb38d839Srin 		return ENOBUFS;
1304fb38d839Srin 	}
1305fb38d839Srin 
1306fb38d839Srin 	snprintf(xnamebuf, MAXCOMLEN, "%s_tx_rx", device_xname(dev));
1307fb38d839Srin 	error = workqueue_create(&sc->sc_queue_wq, xnamebuf,
1308fb38d839Srin 	    igc_handle_queue_work, sc, IGC_WORKQUEUE_PRI, IPL_NET,
1309fb38d839Srin 	    WQ_PERCPU | WQ_MPSAFE);
1310fb38d839Srin 	if (error) {
1311fb38d839Srin 		aprint_error_dev(dev, "workqueue_create failed\n");
1312fb38d839Srin 		return ENOBUFS;
1313fb38d839Srin 	}
1314fb38d839Srin 	sc->sc_txrx_workqueue = false;
1315fb38d839Srin 
1316fb38d839Srin 	sc->queues[0].msix = 0;
1317fb38d839Srin 	sc->linkvec = 0;
1318fb38d839Srin 
1319fb38d839Srin 	return 0;
1320fb38d839Srin }
1321fb38d839Srin 
1322fb38d839Srin static int
1323fb38d839Srin igc_setup_intx(struct igc_softc *sc)
1324fb38d839Srin {
1325fb38d839Srin 	pci_chipset_tag_t pc = sc->osdep.os_pa.pa_pc;
1326fb38d839Srin 	device_t dev = sc->sc_dev;
1327fb38d839Srin 	pci_intr_handle_t *intr = sc->sc_intrs;
1328fb38d839Srin 	void **ihs = sc->sc_ihs;
1329fb38d839Srin 	const char *intrstr;
1330fb38d839Srin 	char intrbuf[PCI_INTRSTR_LEN];
1331fb38d839Srin 	char xnamebuf[32];
1332fb38d839Srin 
1333fb38d839Srin 	intrstr = pci_intr_string(pc, *intr, intrbuf, sizeof(intrbuf));
1334fb38d839Srin 
1335fb38d839Srin 	snprintf(xnamebuf, sizeof(xnamebuf), "%s:intx", device_xname(dev));
1336fb38d839Srin 	pci_intr_setattr(pc, intr, PCI_INTR_MPSAFE, true);
1337fb38d839Srin 	*ihs = pci_intr_establish_xname(pc, *intr, IPL_NET,
1338fb38d839Srin 	    igc_intr, sc, xnamebuf);
1339fb38d839Srin 	if (*ihs == NULL) {
1340fb38d839Srin 		aprint_error_dev(dev,
1341fb38d839Srin 		    "unable to establish interrupt at %s\n", intrstr);
1342fb38d839Srin 		return ENOBUFS;
1343fb38d839Srin 	}
1344fb38d839Srin 	aprint_normal_dev(dev, "interrupting at %s\n", intrstr);
1345fb38d839Srin 
1346fb38d839Srin 	struct igc_queue *iq = sc->queues;
1347fb38d839Srin 	iq->igcq_si = softint_establish(SOFTINT_NET | SOFTINT_MPSAFE,
1348fb38d839Srin 	    igc_handle_queue, iq);
1349fb38d839Srin 	if (iq->igcq_si == NULL) {
1350fb38d839Srin 		aprint_error_dev(dev,
1351fb38d839Srin 		    "%s: unable to establish softint\n", intrstr);
1352fb38d839Srin 		return ENOBUFS;
1353fb38d839Srin 	}
1354fb38d839Srin 
1355fb38d839Srin 	/* create workqueue? */
1356fb38d839Srin 	sc->sc_txrx_workqueue = false;
1357fb38d839Srin 
1358fb38d839Srin 	sc->queues[0].msix = 0;
1359fb38d839Srin 	sc->linkvec = 0;
1360fb38d839Srin 
1361fb38d839Srin 	return 0;
1362fb38d839Srin }
1363fb38d839Srin 
1364fb38d839Srin static int
1365d0d8f2a5Srin igc_dma_malloc(struct igc_softc *sc, bus_size_t size, struct igc_dma_alloc *dma)
1366d0d8f2a5Srin {
1367d0d8f2a5Srin 	struct igc_osdep *os = &sc->osdep;
1368d0d8f2a5Srin 
1369fb38d839Srin 	dma->dma_tag = os->os_dmat;
1370d0d8f2a5Srin 
1371fb38d839Srin 	if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0,
1372fb38d839Srin 	    BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &dma->dma_map))
1373d0d8f2a5Srin 		return 1;
1374d0d8f2a5Srin 	if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1375fb38d839Srin 	    1, &dma->dma_nseg, BUS_DMA_WAITOK))
1376d0d8f2a5Srin 		goto destroy;
1377fb38d839Srin 	/*
1378fb38d839Srin 	 * XXXRO
1379fb38d839Srin 	 *
1380fb38d839Srin 	 * Coherent mapping for descriptors is required for now.
1381fb38d839Srin 	 *
1382fb38d839Srin 	 * Both TX and RX descriptors are 16-byte length, which is shorter
1383fb38d839Srin 	 * than dcache lines on modern CPUs. Therefore, sync for a descriptor
1384fb38d839Srin 	 * may overwrite DMA read for descriptors in the same cache line.
1385fb38d839Srin 	 *
1386fb38d839Srin 	 * Can't we avoid this by use cache-line-aligned descriptors at once?
1387fb38d839Srin 	 */
1388d0d8f2a5Srin 	if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1389fb38d839Srin 	    &dma->dma_vaddr, BUS_DMA_WAITOK | BUS_DMA_COHERENT /* XXXRO */))
1390d0d8f2a5Srin 		goto free;
1391d0d8f2a5Srin 	if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
1392fb38d839Srin 	    NULL, BUS_DMA_WAITOK))
1393d0d8f2a5Srin 		goto unmap;
1394d0d8f2a5Srin 
1395d0d8f2a5Srin 	dma->dma_size = size;
1396d0d8f2a5Srin 
1397d0d8f2a5Srin 	return 0;
1398d0d8f2a5Srin  unmap:
1399d0d8f2a5Srin 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1400d0d8f2a5Srin  free:
1401d0d8f2a5Srin 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1402d0d8f2a5Srin  destroy:
1403d0d8f2a5Srin 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1404d0d8f2a5Srin 	dma->dma_map = NULL;
1405d0d8f2a5Srin 	dma->dma_tag = NULL;
1406d0d8f2a5Srin 	return 1;
1407d0d8f2a5Srin }
1408d0d8f2a5Srin 
1409fb38d839Srin static void
1410d0d8f2a5Srin igc_dma_free(struct igc_softc *sc, struct igc_dma_alloc *dma)
1411d0d8f2a5Srin {
1412fb38d839Srin 
1413d0d8f2a5Srin 	if (dma->dma_tag == NULL)
1414d0d8f2a5Srin 		return;
1415d0d8f2a5Srin 
1416d0d8f2a5Srin 	if (dma->dma_map != NULL) {
1417d0d8f2a5Srin 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1418d0d8f2a5Srin 		    dma->dma_map->dm_mapsize,
1419d0d8f2a5Srin 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1420d0d8f2a5Srin 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1421d0d8f2a5Srin 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1422d0d8f2a5Srin 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1423d0d8f2a5Srin 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1424d0d8f2a5Srin 		dma->dma_map = NULL;
1425d0d8f2a5Srin 	}
1426d0d8f2a5Srin }
1427d0d8f2a5Srin 
1428d0d8f2a5Srin /*********************************************************************
1429d0d8f2a5Srin  *
1430d0d8f2a5Srin  *  Setup networking device structure and register an interface.
1431d0d8f2a5Srin  *
1432d0d8f2a5Srin  **********************************************************************/
1433fb38d839Srin static void
1434d0d8f2a5Srin igc_setup_interface(struct igc_softc *sc)
1435d0d8f2a5Srin {
1436fb38d839Srin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1437d0d8f2a5Srin 
1438fb38d839Srin 	strlcpy(ifp->if_xname, device_xname(sc->sc_dev), sizeof(ifp->if_xname));
1439d0d8f2a5Srin 	ifp->if_softc = sc;
1440d0d8f2a5Srin 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1441fb38d839Srin 	ifp->if_extflags = IFEF_MPSAFE;
1442d0d8f2a5Srin 	ifp->if_ioctl = igc_ioctl;
1443fb38d839Srin 	ifp->if_start = igc_start;
1444fb38d839Srin 	if (sc->sc_nqueues > 1)
1445fb38d839Srin 		ifp->if_transmit = igc_transmit;
1446d0d8f2a5Srin 	ifp->if_watchdog = igc_watchdog;
1447fb38d839Srin 	ifp->if_init = igc_init;
1448fb38d839Srin 	ifp->if_stop = igc_stop;
1449d0d8f2a5Srin 
1450fb38d839Srin #if 0 /* notyet */
1451fb38d839Srin 	ifp->if_capabilities = IFCAP_TSOv4 | IFCAP_TSOv6;
1452fb38d839Srin #endif
1453d0d8f2a5Srin 
1454fb38d839Srin 	ifp->if_capabilities |=
1455fb38d839Srin 	    IFCAP_CSUM_IPv4_Tx  | IFCAP_CSUM_IPv4_Rx  |
1456fb38d839Srin 	    IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx |
1457fb38d839Srin 	    IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx |
1458fb38d839Srin 	    IFCAP_CSUM_TCPv6_Tx | IFCAP_CSUM_TCPv6_Rx |
1459fb38d839Srin 	    IFCAP_CSUM_UDPv6_Tx | IFCAP_CSUM_UDPv6_Rx;
1460fb38d839Srin 
1461fb38d839Srin 	ifp->if_capenable = 0;
1462fb38d839Srin 
1463fb38d839Srin 	sc->sc_ec.ec_capabilities |=
1464fb38d839Srin 	    ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU;
1465fb38d839Srin 
1466fb38d839Srin 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1467fb38d839Srin 	IFQ_SET_READY(&ifp->if_snd);
1468fb38d839Srin 
1469d0d8f2a5Srin #if NVLAN > 0
1470fb38d839Srin 	sc->sc_ec.ec_capabilities |=  ETHERCAP_VLAN_HWTAGGING;
1471d0d8f2a5Srin #endif
1472d0d8f2a5Srin 
1473fb38d839Srin 	mutex_init(&sc->sc_core_lock, MUTEX_DEFAULT, IPL_NET);
1474d0d8f2a5Srin 
1475d0d8f2a5Srin 	/* Initialize ifmedia structures. */
1476fb38d839Srin 	sc->sc_ec.ec_ifmedia = &sc->media;
1477fb38d839Srin 	ifmedia_init_with_lock(&sc->media, IFM_IMASK, igc_media_change,
1478fb38d839Srin 	    igc_media_status, &sc->sc_core_lock);
1479d0d8f2a5Srin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
1480d0d8f2a5Srin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
1481d0d8f2a5Srin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1482d0d8f2a5Srin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
1483d0d8f2a5Srin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
1484fb38d839Srin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_T | IFM_FDX, 0, NULL);
1485d0d8f2a5Srin 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1486d0d8f2a5Srin 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1487d0d8f2a5Srin 
1488fb38d839Srin 	sc->sc_rx_intr_process_limit = IGC_RX_INTR_PROCESS_LIMIT_DEFAULT;
1489fb38d839Srin 	sc->sc_tx_intr_process_limit = IGC_TX_INTR_PROCESS_LIMIT_DEFAULT;
1490fb38d839Srin 	sc->sc_rx_process_limit = IGC_RX_PROCESS_LIMIT_DEFAULT;
1491fb38d839Srin 	sc->sc_tx_process_limit = IGC_TX_PROCESS_LIMIT_DEFAULT;
1492d0d8f2a5Srin 
1493fb38d839Srin 	if_initialize(ifp);
1494fb38d839Srin 	sc->sc_ipq = if_percpuq_create(ifp);
1495fb38d839Srin 	if_deferred_start_init(ifp, NULL);
1496fb38d839Srin 	ether_ifattach(ifp, sc->hw.mac.addr);
1497fb38d839Srin 	ether_set_ifflags_cb(&sc->sc_ec, igc_ifflags_cb);
1498fb38d839Srin 	if_register(ifp);
1499d0d8f2a5Srin }
1500d0d8f2a5Srin 
1501fb38d839Srin static int
1502fb38d839Srin igc_init(struct ifnet *ifp)
1503d0d8f2a5Srin {
1504fb38d839Srin 	struct igc_softc *sc = ifp->if_softc;
1505fb38d839Srin 	int error;
1506d0d8f2a5Srin 
1507fb38d839Srin 	mutex_enter(&sc->sc_core_lock);
1508fb38d839Srin 	error = igc_init_locked(sc);
1509fb38d839Srin 	mutex_exit(&sc->sc_core_lock);
1510d0d8f2a5Srin 
1511fb38d839Srin 	return error;
1512fb38d839Srin }
1513d0d8f2a5Srin 
1514fb38d839Srin static int
1515fb38d839Srin igc_init_locked(struct igc_softc *sc)
1516fb38d839Srin {
1517fb38d839Srin 	struct ethercom *ec = &sc->sc_ec;
1518fb38d839Srin 	struct ifnet *ifp = &ec->ec_if;
1519fb38d839Srin 
1520fb38d839Srin 	DPRINTF(CFG, "called\n");
1521fb38d839Srin 
1522fb38d839Srin 	KASSERT(mutex_owned(&sc->sc_core_lock));
1523fb38d839Srin 
1524fb38d839Srin 	if (ISSET(ifp->if_flags, IFF_RUNNING))
1525fb38d839Srin 		igc_stop_locked(sc);
1526d0d8f2a5Srin 
1527d0d8f2a5Srin 	/* Put the address into the receive address array. */
1528d0d8f2a5Srin 	igc_rar_set(&sc->hw, sc->hw.mac.addr, 0);
1529d0d8f2a5Srin 
1530d0d8f2a5Srin 	/* Initialize the hardware. */
1531d0d8f2a5Srin 	igc_reset(sc);
1532d0d8f2a5Srin 	igc_update_link_status(sc);
1533d0d8f2a5Srin 
1534d0d8f2a5Srin 	/* Setup VLAN support, basic and offload if available. */
1535d0d8f2a5Srin 	IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN);
1536d0d8f2a5Srin 
1537d0d8f2a5Srin 	igc_initialize_transmit_unit(sc);
1538d0d8f2a5Srin 	igc_initialize_receive_unit(sc);
1539d0d8f2a5Srin 
1540fb38d839Srin 	if (ec->ec_capenable & ETHERCAP_VLAN_HWTAGGING) {
1541fb38d839Srin 		uint32_t ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL);
1542d0d8f2a5Srin 		ctrl |= IGC_CTRL_VME;
1543d0d8f2a5Srin 		IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl);
1544d0d8f2a5Srin 	}
1545d0d8f2a5Srin 
1546d0d8f2a5Srin 	/* Setup multicast table. */
1547fb38d839Srin 	igc_set_filter(sc);
1548d0d8f2a5Srin 
1549d0d8f2a5Srin 	igc_clear_hw_cntrs_base_generic(&sc->hw);
1550d0d8f2a5Srin 
1551fb38d839Srin 	if (sc->sc_intr_type == PCI_INTR_TYPE_MSIX)
1552d0d8f2a5Srin 		igc_configure_queues(sc);
1553d0d8f2a5Srin 
1554d0d8f2a5Srin 	/* This clears any pending interrupts */
1555d0d8f2a5Srin 	IGC_READ_REG(&sc->hw, IGC_ICR);
1556d0d8f2a5Srin 	IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC);
1557d0d8f2a5Srin 
1558d0d8f2a5Srin 	/* The driver can now take control from firmware. */
1559d0d8f2a5Srin 	igc_get_hw_control(sc);
1560d0d8f2a5Srin 
1561d0d8f2a5Srin 	/* Set Energy Efficient Ethernet. */
1562d0d8f2a5Srin 	igc_set_eee_i225(&sc->hw, true, true, true);
1563d0d8f2a5Srin 
1564fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1565fb38d839Srin 		struct rx_ring *rxr = &sc->rx_rings[iq];
1566fb38d839Srin 
1567fb38d839Srin 		mutex_enter(&rxr->rxr_lock);
1568d0d8f2a5Srin 		igc_rxfill(rxr);
1569fb38d839Srin 		mutex_exit(&rxr->rxr_lock);
1570d0d8f2a5Srin 	}
1571d0d8f2a5Srin 
1572fb38d839Srin 	sc->sc_core_stopping = false;
1573d0d8f2a5Srin 
1574fb38d839Srin 	ifp->if_flags |= IFF_RUNNING;
1575fb38d839Srin 
1576fb38d839Srin 	/* Save last flags for the callback */
1577fb38d839Srin 	sc->sc_if_flags = ifp->if_flags;
1578fb38d839Srin 
1579686619b1Srin 	callout_schedule(&sc->sc_tick_ch, hz);
1580686619b1Srin 
1581686619b1Srin 	igc_enable_intr(sc);
1582686619b1Srin 
1583fb38d839Srin 	return 0;
1584d0d8f2a5Srin }
1585d0d8f2a5Srin 
1586d0d8f2a5Srin static inline int
1587fb38d839Srin igc_load_mbuf(struct igc_queue *q, bus_dma_tag_t dmat, bus_dmamap_t map,
1588fb38d839Srin     struct mbuf *m)
1589d0d8f2a5Srin {
1590d0d8f2a5Srin 	int error;
1591d0d8f2a5Srin 
1592d0d8f2a5Srin 	error = bus_dmamap_load_mbuf(dmat, map, m,
1593fb38d839Srin 	    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1594d0d8f2a5Srin 
1595fb38d839Srin 	if (__predict_false(error == EFBIG)) {
1596fb38d839Srin 		IGC_DRIVER_EVENT(q, txdma_efbig, 1);
1597fb38d839Srin 		m = m_defrag(m, M_NOWAIT);
1598fb38d839Srin 		if (__predict_false(m == NULL)) {
1599fb38d839Srin 			IGC_DRIVER_EVENT(q, txdma_defrag, 1);
1600fb38d839Srin 			return ENOBUFS;
1601fb38d839Srin 		}
1602fb38d839Srin 		error = bus_dmamap_load_mbuf(dmat, map, m,
1603fb38d839Srin 		    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
1604d0d8f2a5Srin 	}
1605d0d8f2a5Srin 
1606fb38d839Srin 	switch (error) {
1607fb38d839Srin 	case 0:
1608fb38d839Srin 		break;
1609fb38d839Srin 	case ENOMEM:
1610fb38d839Srin 		IGC_DRIVER_EVENT(q, txdma_enomem, 1);
1611fb38d839Srin 		break;
1612fb38d839Srin 	case EINVAL:
1613fb38d839Srin 		IGC_DRIVER_EVENT(q, txdma_einval, 1);
1614fb38d839Srin 		break;
1615fb38d839Srin 	case EAGAIN:
1616fb38d839Srin 		IGC_DRIVER_EVENT(q, txdma_eagain, 1);
1617fb38d839Srin 		break;
1618fb38d839Srin 	default:
1619fb38d839Srin 		IGC_DRIVER_EVENT(q, txdma_other, 1);
1620fb38d839Srin 		break;
1621fb38d839Srin 	}
1622d0d8f2a5Srin 
1623fb38d839Srin 	return error;
1624fb38d839Srin }
1625fb38d839Srin 
1626fb38d839Srin #define IGC_TX_START	1
1627fb38d839Srin #define IGC_TX_TRANSMIT	2
1628fb38d839Srin 
1629fb38d839Srin static void
1630fb38d839Srin igc_start(struct ifnet *ifp)
1631fb38d839Srin {
1632fb38d839Srin 	struct igc_softc *sc = ifp->if_softc;
1633fb38d839Srin 
1634fb38d839Srin 	if (__predict_false(!sc->link_active)) {
1635fb38d839Srin 		IFQ_PURGE(&ifp->if_snd);
1636d0d8f2a5Srin 		return;
1637d0d8f2a5Srin 	}
1638d0d8f2a5Srin 
1639fb38d839Srin 	struct tx_ring *txr = &sc->tx_rings[0]; /* queue 0 */
1640fb38d839Srin 	mutex_enter(&txr->txr_lock);
1641fb38d839Srin 	igc_tx_common_locked(ifp, txr, IGC_TX_START);
1642fb38d839Srin 	mutex_exit(&txr->txr_lock);
1643fb38d839Srin }
1644fb38d839Srin 
1645fb38d839Srin static inline u_int
1646fb38d839Srin igc_select_txqueue(struct igc_softc *sc, struct mbuf *m __unused)
1647fb38d839Srin {
1648fb38d839Srin 	const u_int cpuid = cpu_index(curcpu());
1649fb38d839Srin 
1650fb38d839Srin 	return cpuid % sc->sc_nqueues;
1651fb38d839Srin }
1652fb38d839Srin 
1653fb38d839Srin static int
1654fb38d839Srin igc_transmit(struct ifnet *ifp, struct mbuf *m)
1655fb38d839Srin {
1656fb38d839Srin 	struct igc_softc *sc = ifp->if_softc;
1657fb38d839Srin 	const u_int qid = igc_select_txqueue(sc, m);
1658fb38d839Srin 	struct tx_ring *txr = &sc->tx_rings[qid];
1659fb38d839Srin 	struct igc_queue *q = txr->txr_igcq;
1660fb38d839Srin 
1661fb38d839Srin 	if (__predict_false(!pcq_put(txr->txr_interq, m))) {
1662fb38d839Srin 		IGC_QUEUE_EVENT(q, tx_pcq_drop, 1);
1663fb38d839Srin 		m_freem(m);
1664fb38d839Srin 		return ENOBUFS;
1665fb38d839Srin 	}
1666fb38d839Srin 
1667fb38d839Srin 	mutex_enter(&txr->txr_lock);
1668fb38d839Srin 	igc_tx_common_locked(ifp, txr, IGC_TX_TRANSMIT);
1669fb38d839Srin 	mutex_exit(&txr->txr_lock);
1670fb38d839Srin 
1671fb38d839Srin 	return 0;
1672fb38d839Srin }
1673fb38d839Srin 
1674fb38d839Srin static void
1675fb38d839Srin igc_tx_common_locked(struct ifnet *ifp, struct tx_ring *txr, int caller)
1676fb38d839Srin {
1677fb38d839Srin 	struct igc_softc *sc = ifp->if_softc;
1678fb38d839Srin 	struct igc_queue *q = txr->txr_igcq;
1679fb38d839Srin 	net_stat_ref_t nsr = IF_STAT_GETREF(ifp);
1680fb38d839Srin 	int prod, free, last = -1;
1681fb38d839Srin 	bool post = false;
1682fb38d839Srin 
1683d0d8f2a5Srin 	prod = txr->next_avail_desc;
1684d0d8f2a5Srin 	free = txr->next_to_clean;
1685d0d8f2a5Srin 	if (free <= prod)
1686d0d8f2a5Srin 		free += sc->num_tx_desc;
1687d0d8f2a5Srin 	free -= prod;
1688d0d8f2a5Srin 
1689fb38d839Srin 	DPRINTF(TX, "%s: begin: msix %d prod %d n2c %d free %d\n",
1690fb38d839Srin 	    caller == IGC_TX_TRANSMIT ? "transmit" : "start",
1691fb38d839Srin 	    txr->me, prod, txr->next_to_clean, free);
1692d0d8f2a5Srin 
1693d0d8f2a5Srin 	for (;;) {
1694fb38d839Srin 		struct mbuf *m;
1695fb38d839Srin 
1696fb38d839Srin 		if (__predict_false(free <= IGC_MAX_SCATTER)) {
1697fb38d839Srin 			IGC_QUEUE_EVENT(q, tx_no_desc, 1);
1698d0d8f2a5Srin 			break;
1699d0d8f2a5Srin 		}
1700d0d8f2a5Srin 
1701fb38d839Srin 		if (caller == IGC_TX_TRANSMIT)
1702fb38d839Srin 			m = pcq_get(txr->txr_interq);
1703fb38d839Srin 		else
1704fb38d839Srin 			IFQ_DEQUEUE(&ifp->if_snd, m);
1705fb38d839Srin 		if (__predict_false(m == NULL))
1706d0d8f2a5Srin 			break;
1707d0d8f2a5Srin 
1708fb38d839Srin 		struct igc_tx_buf *txbuf = &txr->tx_buffers[prod];
1709fb38d839Srin 		bus_dmamap_t map = txbuf->map;
1710d0d8f2a5Srin 
1711fb38d839Srin 		if (__predict_false(
1712fb38d839Srin 		    igc_load_mbuf(q, txr->txdma.dma_tag, map, m))) {
1713fb38d839Srin 			if (caller == IGC_TX_TRANSMIT)
1714fb38d839Srin 				IGC_QUEUE_EVENT(q, tx_pcq_drop, 1);
1715d0d8f2a5Srin 			m_freem(m);
1716be6f2fceSriastradh 			if_statinc_ref(ifp, nsr, if_oerrors);
1717d0d8f2a5Srin 			continue;
1718d0d8f2a5Srin 		}
1719d0d8f2a5Srin 
1720d0d8f2a5Srin 		bus_dmamap_sync(txr->txdma.dma_tag, map, 0,
1721d0d8f2a5Srin 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1722d0d8f2a5Srin 
1723fb38d839Srin 		uint32_t ctx_cmd_type_len = 0, olinfo_status = 0;
1724fb38d839Srin 		if (igc_tx_ctx_setup(txr, m, prod, &ctx_cmd_type_len,
1725fb38d839Srin 		    &olinfo_status)) {
1726fb38d839Srin 			IGC_QUEUE_EVENT(q, tx_ctx, 1);
1727d0d8f2a5Srin 			/* Consume the first descriptor */
1728fb38d839Srin 			prod = igc_txdesc_incr(sc, prod);
1729d0d8f2a5Srin 			free--;
1730d0d8f2a5Srin 		}
1731fb38d839Srin 		for (int i = 0; i < map->dm_nsegs; i++) {
1732fb38d839Srin 			union igc_adv_tx_desc *txdesc = &txr->tx_base[prod];
1733d0d8f2a5Srin 
1734fb38d839Srin 			uint32_t cmd_type_len = ctx_cmd_type_len |
1735fb38d839Srin 			    IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DTYP_DATA |
1736d0d8f2a5Srin 			    IGC_ADVTXD_DCMD_DEXT | map->dm_segs[i].ds_len;
1737fb38d839Srin 			if (i == map->dm_nsegs - 1) {
1738fb38d839Srin 				cmd_type_len |=
1739fb38d839Srin 				    IGC_ADVTXD_DCMD_EOP | IGC_ADVTXD_DCMD_RS;
1740fb38d839Srin 			}
1741d0d8f2a5Srin 
1742fb38d839Srin 			igc_txdesc_sync(txr, prod,
1743fb38d839Srin 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1744fb38d839Srin 			htolem64(&txdesc->read.buffer_addr,
1745fb38d839Srin 			    map->dm_segs[i].ds_addr);
1746d0d8f2a5Srin 			htolem32(&txdesc->read.cmd_type_len, cmd_type_len);
1747d0d8f2a5Srin 			htolem32(&txdesc->read.olinfo_status, olinfo_status);
1748fb38d839Srin 			igc_txdesc_sync(txr, prod,
1749fb38d839Srin 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1750d0d8f2a5Srin 
1751d0d8f2a5Srin 			last = prod;
1752fb38d839Srin 			prod = igc_txdesc_incr(sc, prod);
1753d0d8f2a5Srin 		}
1754d0d8f2a5Srin 
1755d0d8f2a5Srin 		txbuf->m_head = m;
1756d0d8f2a5Srin 		txbuf->eop_index = last;
1757d0d8f2a5Srin 
1758fb38d839Srin 		bpf_mtap(ifp, m, BPF_D_OUT);
1759d0d8f2a5Srin 
1760be6f2fceSriastradh 		if_statadd_ref(ifp, nsr, if_obytes, m->m_pkthdr.len);
1761fb38d839Srin 		if (m->m_flags & M_MCAST)
1762be6f2fceSriastradh 			if_statinc_ref(ifp, nsr, if_omcasts);
1763fb38d839Srin 		IGC_QUEUE_EVENT(q, tx_packets, 1);
1764fb38d839Srin 		IGC_QUEUE_EVENT(q, tx_bytes, m->m_pkthdr.len);
1765fb38d839Srin 
1766fb38d839Srin 		free -= map->dm_nsegs;
1767fb38d839Srin 		post = true;
1768d0d8f2a5Srin 	}
1769d0d8f2a5Srin 
1770d0d8f2a5Srin 	if (post) {
1771d0d8f2a5Srin 		txr->next_avail_desc = prod;
1772d0d8f2a5Srin 		IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), prod);
1773d0d8f2a5Srin 	}
1774fb38d839Srin 
1775fb38d839Srin 	DPRINTF(TX, "%s: done : msix %d prod %d n2c %d free %d\n",
1776fb38d839Srin 	    caller == IGC_TX_TRANSMIT ? "transmit" : "start",
1777fb38d839Srin 	    txr->me, prod, txr->next_to_clean, free);
1778fb38d839Srin 
1779fb38d839Srin 	IF_STAT_PUTREF(ifp);
1780d0d8f2a5Srin }
1781d0d8f2a5Srin 
1782fb38d839Srin static bool
1783fb38d839Srin igc_txeof(struct tx_ring *txr, u_int limit)
1784d0d8f2a5Srin {
1785d0d8f2a5Srin 	struct igc_softc *sc = txr->sc;
1786fb38d839Srin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1787fb38d839Srin 	int cons, prod;
1788fb38d839Srin 	bool more = false;
1789d0d8f2a5Srin 
1790d0d8f2a5Srin 	prod = txr->next_avail_desc;
1791d0d8f2a5Srin 	cons = txr->next_to_clean;
1792d0d8f2a5Srin 
1793fb38d839Srin 	if (cons == prod) {
1794fb38d839Srin 		DPRINTF(TX, "false: msix %d cons %d prod %d\n",
1795fb38d839Srin 		    txr->me, cons, prod);
1796fb38d839Srin 		return false;
1797fb38d839Srin 	}
1798d0d8f2a5Srin 
1799d0d8f2a5Srin 	do {
1800fb38d839Srin 		struct igc_tx_buf *txbuf = &txr->tx_buffers[cons];
1801fb38d839Srin 		const int last = txbuf->eop_index;
1802d0d8f2a5Srin 
1803fb38d839Srin 		membar_consumer();	/* XXXRO necessary? */
1804fb38d839Srin 
1805fb38d839Srin 		KASSERT(last != -1);
1806fb38d839Srin 		union igc_adv_tx_desc *txdesc = &txr->tx_base[last];
1807fb38d839Srin 		igc_txdesc_sync(txr, last, BUS_DMASYNC_POSTREAD);
1808fb38d839Srin 		const uint32_t status = le32toh(txdesc->wb.status);
1809fb38d839Srin 		igc_txdesc_sync(txr, last, BUS_DMASYNC_PREREAD);
1810fb38d839Srin 
1811fb38d839Srin 		if (!(status & IGC_TXD_STAT_DD))
1812d0d8f2a5Srin 			break;
1813d0d8f2a5Srin 
1814fb38d839Srin 		if (limit-- == 0) {
1815fb38d839Srin 			more = true;
1816fb38d839Srin 			DPRINTF(TX, "pending TX "
1817fb38d839Srin 			    "msix %d cons %d last %d prod %d "
1818fb38d839Srin 			    "status 0x%08x\n",
1819fb38d839Srin 			    txr->me, cons, last, prod, status);
1820fb38d839Srin 			break;
1821fb38d839Srin 		}
1822d0d8f2a5Srin 
1823fb38d839Srin 		DPRINTF(TX, "handled TX "
1824fb38d839Srin 		    "msix %d cons %d last %d prod %d "
1825fb38d839Srin 		    "status 0x%08x\n",
1826fb38d839Srin 		    txr->me, cons, last, prod, status);
1827fb38d839Srin 
1828fb38d839Srin 		if_statinc(ifp, if_opackets);
1829fb38d839Srin 
1830fb38d839Srin 		bus_dmamap_t map = txbuf->map;
1831d0d8f2a5Srin 		bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1832d0d8f2a5Srin 		    BUS_DMASYNC_POSTWRITE);
1833d0d8f2a5Srin 		bus_dmamap_unload(txr->txdma.dma_tag, map);
1834d0d8f2a5Srin 		m_freem(txbuf->m_head);
1835d0d8f2a5Srin 
1836d0d8f2a5Srin 		txbuf->m_head = NULL;
1837d0d8f2a5Srin 		txbuf->eop_index = -1;
1838d0d8f2a5Srin 
1839fb38d839Srin 		cons = igc_txdesc_incr(sc, last);
1840d0d8f2a5Srin 	} while (cons != prod);
1841d0d8f2a5Srin 
1842d0d8f2a5Srin 	txr->next_to_clean = cons;
1843d0d8f2a5Srin 
1844fb38d839Srin 	return more;
1845fb38d839Srin }
1846d0d8f2a5Srin 
1847fb38d839Srin static void
1848fb38d839Srin igc_intr_barrier(struct igc_softc *sc __unused)
1849fb38d839Srin {
1850fb38d839Srin 
1851fb38d839Srin 	xc_barrier(0);
1852fb38d839Srin }
1853fb38d839Srin 
1854fb38d839Srin static void
1855fb38d839Srin igc_stop(struct ifnet *ifp, int disable)
1856fb38d839Srin {
1857fb38d839Srin 	struct igc_softc *sc = ifp->if_softc;
1858fb38d839Srin 
1859fb38d839Srin 	mutex_enter(&sc->sc_core_lock);
1860fb38d839Srin 	igc_stop_locked(sc);
1861fb38d839Srin 	mutex_exit(&sc->sc_core_lock);
1862d0d8f2a5Srin }
1863d0d8f2a5Srin 
1864d0d8f2a5Srin /*********************************************************************
1865d0d8f2a5Srin  *
1866d0d8f2a5Srin  *  This routine disables all traffic on the adapter by issuing a
1867d0d8f2a5Srin  *  global reset on the MAC.
1868d0d8f2a5Srin  *
1869d0d8f2a5Srin  **********************************************************************/
1870fb38d839Srin static void
1871fb38d839Srin igc_stop_locked(struct igc_softc *sc)
1872d0d8f2a5Srin {
1873fb38d839Srin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1874fb38d839Srin 
1875fb38d839Srin 	DPRINTF(CFG, "called\n");
1876fb38d839Srin 
1877fb38d839Srin 	KASSERT(mutex_owned(&sc->sc_core_lock));
1878fb38d839Srin 
1879fb38d839Srin 	/*
1880fb38d839Srin 	 * If stopping processing has already started, do nothing.
1881fb38d839Srin 	 */
1882fb38d839Srin 	if ((ifp->if_flags & IFF_RUNNING) == 0)
1883fb38d839Srin 		return;
1884d0d8f2a5Srin 
1885d0d8f2a5Srin 	/* Tell the stack that the interface is no longer active. */
1886d0d8f2a5Srin 	ifp->if_flags &= ~IFF_RUNNING;
1887d0d8f2a5Srin 
1888fb38d839Srin 	/*
1889fb38d839Srin 	 * igc_handle_queue() can enable interrupts, so wait for completion of
1890fb38d839Srin 	 * last igc_handle_queue() after unset IFF_RUNNING.
1891fb38d839Srin 	 */
1892fb38d839Srin 	mutex_exit(&sc->sc_core_lock);
1893fb38d839Srin 	igc_barrier_handle_queue(sc);
1894fb38d839Srin 	mutex_enter(&sc->sc_core_lock);
1895fb38d839Srin 
1896fb38d839Srin 	sc->sc_core_stopping = true;
1897fb38d839Srin 
1898d0d8f2a5Srin 	igc_disable_intr(sc);
1899d0d8f2a5Srin 
1900fb38d839Srin 	callout_halt(&sc->sc_tick_ch, &sc->sc_core_lock);
1901fb38d839Srin 
1902d0d8f2a5Srin 	igc_reset_hw(&sc->hw);
1903d0d8f2a5Srin 	IGC_WRITE_REG(&sc->hw, IGC_WUC, 0);
1904d0d8f2a5Srin 
1905fb38d839Srin 	/*
1906fb38d839Srin 	 * Wait for completion of interrupt handlers.
1907fb38d839Srin 	 */
1908fb38d839Srin 	mutex_exit(&sc->sc_core_lock);
1909fb38d839Srin 	igc_intr_barrier(sc);
1910fb38d839Srin 	mutex_enter(&sc->sc_core_lock);
1911d0d8f2a5Srin 
1912d0d8f2a5Srin 	igc_update_link_status(sc);
1913fb38d839Srin 
1914fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1915fb38d839Srin 		struct tx_ring *txr = &sc->tx_rings[iq];
1916fb38d839Srin 
1917fb38d839Srin 		igc_withdraw_transmit_packets(txr, false);
1918fb38d839Srin 	}
1919fb38d839Srin 
1920fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
1921fb38d839Srin 		struct rx_ring *rxr = &sc->rx_rings[iq];
1922fb38d839Srin 
1923fb38d839Srin 		igc_clear_receive_status(rxr);
1924fb38d839Srin 	}
1925fb38d839Srin 
1926fb38d839Srin 	/* Save last flags for the callback */
1927fb38d839Srin 	sc->sc_if_flags = ifp->if_flags;
1928d0d8f2a5Srin }
1929d0d8f2a5Srin 
1930d0d8f2a5Srin /*********************************************************************
1931d0d8f2a5Srin  *  Ioctl entry point
1932d0d8f2a5Srin  *
1933d0d8f2a5Srin  *  igc_ioctl is called when the user wants to configure the
1934d0d8f2a5Srin  *  interface.
1935d0d8f2a5Srin  *
1936d0d8f2a5Srin  *  return 0 on success, positive on failure
1937d0d8f2a5Srin  **********************************************************************/
1938fb38d839Srin static int
1939fb38d839Srin igc_ioctl(struct ifnet * ifp, u_long cmd, void *data)
1940d0d8f2a5Srin {
1941fb38d839Srin 	struct igc_softc *sc __unused = ifp->if_softc;
1942fb38d839Srin 	int s;
1943fb38d839Srin 	int error;
1944d0d8f2a5Srin 
1945fb38d839Srin 	DPRINTF(CFG, "cmd 0x%016lx\n", cmd);
1946d0d8f2a5Srin 
1947d0d8f2a5Srin 	switch (cmd) {
1948fb38d839Srin 	case SIOCADDMULTI:
1949fb38d839Srin 	case SIOCDELMULTI:
1950d0d8f2a5Srin 		break;
1951d0d8f2a5Srin 	default:
1952fb38d839Srin 		KASSERT(IFNET_LOCKED(ifp));
1953d0d8f2a5Srin 	}
1954d0d8f2a5Srin 
1955fb38d839Srin 	if (cmd == SIOCZIFDATA) {
1956fb38d839Srin 		mutex_enter(&sc->sc_core_lock);
1957fb38d839Srin 		igc_clear_counters(sc);
1958fb38d839Srin 		mutex_exit(&sc->sc_core_lock);
1959fb38d839Srin 	}
1960fb38d839Srin 
1961fb38d839Srin 	switch (cmd) {
1962fb38d839Srin #ifdef IF_RXR
1963fb38d839Srin 	case SIOCGIFRXR:
1964fb38d839Srin 		s = splnet();
1965fb38d839Srin 		error = igc_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1966fb38d839Srin 		splx(s);
1967fb38d839Srin 		break;
1968fb38d839Srin #endif
1969fb38d839Srin 	default:
1970fb38d839Srin 		s = splnet();
1971fb38d839Srin 		error = ether_ioctl(ifp, cmd, data);
1972fb38d839Srin 		splx(s);
1973fb38d839Srin 		break;
1974fb38d839Srin 	}
1975fb38d839Srin 
1976fb38d839Srin 	if (error != ENETRESET)
1977fb38d839Srin 		return error;
1978fb38d839Srin 
1979fb38d839Srin 	error = 0;
1980fb38d839Srin 
1981fb38d839Srin 	if (cmd == SIOCSIFCAP)
1982fb38d839Srin 		error = if_init(ifp);
1983fb38d839Srin 	else if ((cmd == SIOCADDMULTI) || (cmd == SIOCDELMULTI)) {
1984fb38d839Srin 		mutex_enter(&sc->sc_core_lock);
1985fb38d839Srin 		if (sc->sc_if_flags & IFF_RUNNING) {
1986fb38d839Srin 			/*
1987fb38d839Srin 			 * Multicast list has changed; set the hardware filter
1988fb38d839Srin 			 * accordingly.
1989fb38d839Srin 			 */
1990d0d8f2a5Srin 			igc_disable_intr(sc);
1991fb38d839Srin 			igc_set_filter(sc);
1992d0d8f2a5Srin 			igc_enable_intr(sc);
1993d0d8f2a5Srin 		}
1994fb38d839Srin 		mutex_exit(&sc->sc_core_lock);
1995d0d8f2a5Srin 	}
1996d0d8f2a5Srin 
1997d0d8f2a5Srin 	return error;
1998d0d8f2a5Srin }
1999d0d8f2a5Srin 
2000fb38d839Srin #ifdef IF_RXR
2001fb38d839Srin static int
2002d0d8f2a5Srin igc_rxrinfo(struct igc_softc *sc, struct if_rxrinfo *ifri)
2003d0d8f2a5Srin {
2004fb38d839Srin 	struct if_rxring_info *ifr, ifr1;
2005fb38d839Srin 	int error;
2006d0d8f2a5Srin 
2007fb38d839Srin 	if (sc->sc_nqueues > 1) {
2008fb38d839Srin 		ifr = kmem_zalloc(sc->sc_nqueues * sizeof(*ifr), KM_SLEEP);
2009fb38d839Srin 	} else {
2010fb38d839Srin 		ifr = &ifr1;
2011fb38d839Srin 		memset(ifr, 0, sizeof(*ifr));
2012fb38d839Srin 	}
2013d0d8f2a5Srin 
2014fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2015fb38d839Srin 		struct rx_ring *rxr = &sc->rx_rings[iq];
2016fb38d839Srin 
2017fb38d839Srin 		ifr[iq].ifr_size = MCLBYTES;
2018fb38d839Srin 		snprintf(ifr[iq].ifr_name, sizeof(ifr[iq].ifr_name), "%d", iq);
2019fb38d839Srin 		ifr[iq].ifr_info = rxr->rx_ring;
2020d0d8f2a5Srin 	}
2021d0d8f2a5Srin 
2022d0d8f2a5Srin 	error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr);
2023fb38d839Srin 	if (sc->sc_nqueues > 1)
2024fb38d839Srin 		kmem_free(ifr, sc->sc_nqueues * sizeof(*ifr));
2025d0d8f2a5Srin 
2026d0d8f2a5Srin 	return error;
2027d0d8f2a5Srin }
2028fb38d839Srin #endif
2029d0d8f2a5Srin 
2030fb38d839Srin static void
2031d0d8f2a5Srin igc_rxfill(struct rx_ring *rxr)
2032d0d8f2a5Srin {
2033d0d8f2a5Srin 	struct igc_softc *sc = rxr->sc;
2034fb38d839Srin 	int id;
2035d0d8f2a5Srin 
2036fb38d839Srin 	for (id = 0; id < sc->num_rx_desc; id++) {
2037fb38d839Srin 		if (igc_get_buf(rxr, id, false)) {
2038fb38d839Srin 			panic("%s: msix=%d i=%d\n", __func__, rxr->me, id);
2039fb38d839Srin 		}
2040d0d8f2a5Srin 	}
2041d0d8f2a5Srin 
2042fb38d839Srin 	id = sc->num_rx_desc - 1;
2043fb38d839Srin 	rxr->last_desc_filled = id;
2044fb38d839Srin 	IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), id);
2045fb38d839Srin 	rxr->next_to_check = 0;
2046d0d8f2a5Srin }
2047d0d8f2a5Srin 
2048fb38d839Srin static void
2049fb38d839Srin igc_rxrefill(struct rx_ring *rxr, int end)
2050d0d8f2a5Srin {
2051d0d8f2a5Srin 	struct igc_softc *sc = rxr->sc;
2052fb38d839Srin 	int id;
2053d0d8f2a5Srin 
2054fb38d839Srin 	for (id = rxr->next_to_check; id != end; id = igc_rxdesc_incr(sc, id)) {
2055fb38d839Srin 		if (igc_get_buf(rxr, id, true)) {
2056fb38d839Srin 			/* XXXRO */
2057fb38d839Srin 			panic("%s: msix=%d id=%d\n", __func__, rxr->me, id);
2058d0d8f2a5Srin 		}
2059fb38d839Srin 	}
2060fb38d839Srin 
2061fb38d839Srin 	id = igc_rxdesc_decr(sc, id);
2062fb38d839Srin 	DPRINTF(RX, "%s RDT %d id %d\n",
2063fb38d839Srin 	    rxr->last_desc_filled == id ? "same" : "diff",
2064fb38d839Srin 	    rxr->last_desc_filled, id);
2065fb38d839Srin 	rxr->last_desc_filled = id;
2066fb38d839Srin 	IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me), id);
2067d0d8f2a5Srin }
2068d0d8f2a5Srin 
2069d0d8f2a5Srin /*********************************************************************
2070d0d8f2a5Srin  *
2071d0d8f2a5Srin  *  This routine executes in interrupt context. It replenishes
2072d0d8f2a5Srin  *  the mbufs in the descriptor and sends data which has been
2073d0d8f2a5Srin  *  dma'ed into host memory to upper layer.
2074d0d8f2a5Srin  *
2075d0d8f2a5Srin  *********************************************************************/
2076fb38d839Srin static bool
2077fb38d839Srin igc_rxeof(struct rx_ring *rxr, u_int limit)
2078d0d8f2a5Srin {
2079d0d8f2a5Srin 	struct igc_softc *sc = rxr->sc;
2080fb38d839Srin 	struct igc_queue *q = rxr->rxr_igcq;
2081fb38d839Srin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2082fb38d839Srin 	int id;
2083fb38d839Srin 	bool more = false;
2084fb38d839Srin 
2085fb38d839Srin 	id = rxr->next_to_check;
2086fb38d839Srin 	for (;;) {
2087fb38d839Srin 		union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
2088d0d8f2a5Srin 		struct igc_rx_buf *rxbuf, *nxbuf;
2089fb38d839Srin 		struct mbuf *mp, *m;
2090d0d8f2a5Srin 
2091fb38d839Srin 		igc_rxdesc_sync(rxr, id,
2092fb38d839Srin 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2093d0d8f2a5Srin 
2094fb38d839Srin 		const uint32_t staterr = le32toh(rxdesc->wb.upper.status_error);
2095d0d8f2a5Srin 
2096d0d8f2a5Srin 		if (!ISSET(staterr, IGC_RXD_STAT_DD)) {
2097fb38d839Srin 			igc_rxdesc_sync(rxr, id,
2098fb38d839Srin 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2099fb38d839Srin 			break;
2100fb38d839Srin 		}
2101fb38d839Srin 
2102fb38d839Srin 		if (limit-- == 0) {
2103fb38d839Srin 			igc_rxdesc_sync(rxr, id,
2104fb38d839Srin 			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2105fb38d839Srin 			DPRINTF(RX, "more=true\n");
2106fb38d839Srin 			more = true;
2107d0d8f2a5Srin 			break;
2108d0d8f2a5Srin 		}
2109d0d8f2a5Srin 
2110d0d8f2a5Srin 		/* Zero out the receive descriptors status. */
2111d0d8f2a5Srin 		rxdesc->wb.upper.status_error = 0;
2112d0d8f2a5Srin 
2113d0d8f2a5Srin 		/* Pull the mbuf off the ring. */
2114fb38d839Srin 		rxbuf = &rxr->rx_buffers[id];
2115fb38d839Srin 		bus_dmamap_t map = rxbuf->map;
2116fb38d839Srin 		bus_dmamap_sync(rxr->rxdma.dma_tag, map,
2117fb38d839Srin 		    0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2118fb38d839Srin 		bus_dmamap_unload(rxr->rxdma.dma_tag, map);
2119d0d8f2a5Srin 
2120d0d8f2a5Srin 		mp = rxbuf->buf;
2121fb38d839Srin 		rxbuf->buf = NULL;
2122fb38d839Srin 
2123fb38d839Srin 		const bool eop = staterr & IGC_RXD_STAT_EOP;
2124fb38d839Srin 		const uint16_t len = le16toh(rxdesc->wb.upper.length);
2125fb38d839Srin 
21265c462e97Soster #if NVLAN > 0
2127fb38d839Srin 		const uint16_t vtag = le16toh(rxdesc->wb.upper.vlan);
21285c462e97Soster #endif
2129fb38d839Srin 
2130fb38d839Srin 		const uint32_t ptype = le32toh(rxdesc->wb.lower.lo_dword.data) &
2131d0d8f2a5Srin 		    IGC_PKTTYPE_MASK;
2132fb38d839Srin 
2133fb38d839Srin 		const uint32_t hash __unused =
2134fb38d839Srin 		    le32toh(rxdesc->wb.lower.hi_dword.rss);
2135fb38d839Srin 		const uint16_t hashtype __unused =
2136fb38d839Srin 		    le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) &
2137d0d8f2a5Srin 		    IGC_RXDADV_RSSTYPE_MASK;
2138d0d8f2a5Srin 
2139fb38d839Srin 		igc_rxdesc_sync(rxr, id,
2140fb38d839Srin 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2141fb38d839Srin 
2142fb38d839Srin 		if (__predict_false(staterr & IGC_RXDEXT_STATERR_RXE)) {
2143d0d8f2a5Srin 			m_freem(rxbuf->fmp);
2144d0d8f2a5Srin 			rxbuf->fmp = NULL;
2145d0d8f2a5Srin 
2146d0d8f2a5Srin 			m_freem(mp);
2147fb38d839Srin 			m = NULL;
2148fb38d839Srin 
2149fb38d839Srin 			if_statinc(ifp, if_ierrors);
2150fb38d839Srin 			IGC_QUEUE_EVENT(q, rx_discard, 1);
2151fb38d839Srin 
2152fb38d839Srin 			DPRINTF(RX, "ierrors++\n");
2153fb38d839Srin 
2154d0d8f2a5Srin 			goto next_desc;
2155d0d8f2a5Srin 		}
2156d0d8f2a5Srin 
2157fb38d839Srin 		if (__predict_false(mp == NULL)) {
2158d0d8f2a5Srin 			panic("%s: igc_rxeof: NULL mbuf in slot %d "
2159fb38d839Srin 			    "(filled %d)", device_xname(sc->sc_dev),
2160fb38d839Srin 			    id, rxr->last_desc_filled);
2161d0d8f2a5Srin 		}
2162d0d8f2a5Srin 
2163d0d8f2a5Srin 		if (!eop) {
2164d0d8f2a5Srin 			/*
2165d0d8f2a5Srin 			 * Figure out the next descriptor of this frame.
2166d0d8f2a5Srin 			 */
2167fb38d839Srin 			int nextp = igc_rxdesc_incr(sc, id);
2168fb38d839Srin 
2169d0d8f2a5Srin 			nxbuf = &rxr->rx_buffers[nextp];
2170fb38d839Srin 			/*
2171fb38d839Srin 			 * TODO prefetch(nxbuf);
2172fb38d839Srin 			 */
2173d0d8f2a5Srin 		}
2174d0d8f2a5Srin 
2175d0d8f2a5Srin 		mp->m_len = len;
2176d0d8f2a5Srin 
2177d0d8f2a5Srin 		m = rxbuf->fmp;
2178fb38d839Srin 		rxbuf->fmp = NULL;
2179d0d8f2a5Srin 
2180fb38d839Srin 		if (m != NULL) {
2181d0d8f2a5Srin 			m->m_pkthdr.len += mp->m_len;
2182fb38d839Srin 		} else {
2183d0d8f2a5Srin 			m = mp;
2184d0d8f2a5Srin 			m->m_pkthdr.len = mp->m_len;
2185d0d8f2a5Srin #if NVLAN > 0
2186fb38d839Srin 			if (staterr & IGC_RXD_STAT_VP)
2187fb38d839Srin 				vlan_set_tag(m, vtag);
2188d0d8f2a5Srin #endif
2189d0d8f2a5Srin 		}
2190d0d8f2a5Srin 
2191d0d8f2a5Srin 		/* Pass the head pointer on */
2192fb38d839Srin 		if (!eop) {
2193d0d8f2a5Srin 			nxbuf->fmp = m;
2194d0d8f2a5Srin 			m = NULL;
2195d0d8f2a5Srin 			mp->m_next = nxbuf->buf;
2196d0d8f2a5Srin 		} else {
2197fb38d839Srin 			m_set_rcvif(m, ifp);
2198d0d8f2a5Srin 
2199fb38d839Srin 			m->m_pkthdr.csum_flags = igc_rx_checksum(q,
2200fb38d839Srin 			    ifp->if_capenable, staterr, ptype);
2201fb38d839Srin 
2202fb38d839Srin #ifdef notyet
2203d0d8f2a5Srin 			if (hashtype != IGC_RXDADV_RSSTYPE_NONE) {
2204d0d8f2a5Srin 				m->m_pkthdr.ph_flowid = hash;
2205d0d8f2a5Srin 				SET(m->m_pkthdr.csum_flags, M_FLOWID);
2206d0d8f2a5Srin 			}
2207d0d8f2a5Srin 			ml_enqueue(&ml, m);
2208fb38d839Srin #endif
2209fb38d839Srin 
2210fb38d839Srin 			if_percpuq_enqueue(sc->sc_ipq, m);
2211fb38d839Srin 
2212fb38d839Srin 			if_statinc(ifp, if_ipackets);
2213fb38d839Srin 			IGC_QUEUE_EVENT(q, rx_packets, 1);
2214fb38d839Srin 			IGC_QUEUE_EVENT(q, rx_bytes, m->m_pkthdr.len);
2215d0d8f2a5Srin 		}
2216d0d8f2a5Srin  next_desc:
2217d0d8f2a5Srin 		/* Advance our pointers to the next descriptor. */
2218fb38d839Srin 		id = igc_rxdesc_incr(sc, id);
2219d0d8f2a5Srin 	}
2220d0d8f2a5Srin 
2221fb38d839Srin 	DPRINTF(RX, "fill queue[%d]\n", rxr->me);
2222fb38d839Srin 	igc_rxrefill(rxr, id);
2223d0d8f2a5Srin 
2224fb38d839Srin 	DPRINTF(RX, "%s n2c %d id %d\n",
2225fb38d839Srin 	    rxr->next_to_check == id ? "same" : "diff",
2226fb38d839Srin 	    rxr->next_to_check, id);
2227fb38d839Srin 	rxr->next_to_check = id;
2228fb38d839Srin 
2229fb38d839Srin #ifdef OPENBSD
2230d0d8f2a5Srin 	if (!(staterr & IGC_RXD_STAT_DD))
2231d0d8f2a5Srin 		return 0;
2232fb38d839Srin #endif
2233d0d8f2a5Srin 
2234fb38d839Srin 	return more;
2235d0d8f2a5Srin }
2236d0d8f2a5Srin 
2237d0d8f2a5Srin /*********************************************************************
2238d0d8f2a5Srin  *
2239d0d8f2a5Srin  *  Verify that the hardware indicated that the checksum is valid.
2240d0d8f2a5Srin  *  Inform the stack about the status of checksum so that stack
2241d0d8f2a5Srin  *  doesn't spend time verifying the checksum.
2242d0d8f2a5Srin  *
2243d0d8f2a5Srin  *********************************************************************/
2244fb38d839Srin static int
2245fb38d839Srin igc_rx_checksum(struct igc_queue *q, uint64_t capenable, uint32_t staterr,
2246fb38d839Srin     uint32_t ptype)
2247d0d8f2a5Srin {
2248fb38d839Srin 	const uint16_t status = (uint16_t)staterr;
2249fb38d839Srin 	const uint8_t errors = (uint8_t)(staterr >> 24);
2250fb38d839Srin 	int flags = 0;
2251d0d8f2a5Srin 
2252fb38d839Srin 	if ((status & IGC_RXD_STAT_IPCS) != 0 &&
2253fb38d839Srin 	    (capenable & IFCAP_CSUM_IPv4_Rx) != 0) {
2254fb38d839Srin 		IGC_DRIVER_EVENT(q, rx_ipcs, 1);
2255fb38d839Srin 		flags |= M_CSUM_IPv4;
2256fb38d839Srin 		if (__predict_false((errors & IGC_RXD_ERR_IPE) != 0)) {
2257fb38d839Srin 			IGC_DRIVER_EVENT(q, rx_ipcs_bad, 1);
2258fb38d839Srin 			flags |= M_CSUM_IPv4_BAD;
2259d0d8f2a5Srin 		}
2260d0d8f2a5Srin 	}
2261d0d8f2a5Srin 
2262fb38d839Srin 	if ((status & IGC_RXD_STAT_TCPCS) != 0) {
2263fb38d839Srin 		IGC_DRIVER_EVENT(q, rx_tcpcs, 1);
2264fb38d839Srin 		if ((capenable & IFCAP_CSUM_TCPv4_Rx) != 0)
2265fb38d839Srin 			flags |= M_CSUM_TCPv4;
2266fb38d839Srin 		if ((capenable & IFCAP_CSUM_TCPv6_Rx) != 0)
2267fb38d839Srin 			flags |= M_CSUM_TCPv6;
2268fb38d839Srin 	}
2269fb38d839Srin 
2270fb38d839Srin 	if ((status & IGC_RXD_STAT_UDPCS) != 0) {
2271fb38d839Srin 		IGC_DRIVER_EVENT(q, rx_udpcs, 1);
2272fb38d839Srin 		if ((capenable & IFCAP_CSUM_UDPv4_Rx) != 0)
2273fb38d839Srin 			flags |= M_CSUM_UDPv4;
2274fb38d839Srin 		if ((capenable & IFCAP_CSUM_UDPv6_Rx) != 0)
2275fb38d839Srin 			flags |= M_CSUM_UDPv6;
2276fb38d839Srin 	}
2277fb38d839Srin 
2278fb38d839Srin 	if (__predict_false((errors & IGC_RXD_ERR_TCPE) != 0)) {
2279fb38d839Srin 		IGC_DRIVER_EVENT(q, rx_l4cs_bad, 1);
2280fb38d839Srin 		if ((flags & ~M_CSUM_IPv4) != 0)
2281fb38d839Srin 			flags |= M_CSUM_TCP_UDP_BAD;
2282fb38d839Srin 	}
2283fb38d839Srin 
2284fb38d839Srin 	return flags;
2285fb38d839Srin }
2286fb38d839Srin 
2287fb38d839Srin static void
2288d0d8f2a5Srin igc_watchdog(struct ifnet * ifp)
2289d0d8f2a5Srin {
2290d0d8f2a5Srin }
2291d0d8f2a5Srin 
2292fb38d839Srin static void
2293fb38d839Srin igc_tick(void *arg)
2294fb38d839Srin {
2295fb38d839Srin 	struct igc_softc *sc = arg;
2296fb38d839Srin 
2297fb38d839Srin 	mutex_enter(&sc->sc_core_lock);
2298fb38d839Srin 
2299fb38d839Srin 	if (__predict_false(sc->sc_core_stopping)) {
2300fb38d839Srin 		mutex_exit(&sc->sc_core_lock);
2301fb38d839Srin 		return;
2302fb38d839Srin 	}
2303fb38d839Srin 
2304fb38d839Srin 	/* XXX watchdog */
2305fb38d839Srin 	if (0) {
2306fb38d839Srin 		IGC_GLOBAL_EVENT(sc, watchdog, 1);
2307fb38d839Srin 	}
2308fb38d839Srin 
2309fb38d839Srin 	igc_update_counters(sc);
2310fb38d839Srin 
2311fb38d839Srin 	mutex_exit(&sc->sc_core_lock);
2312fb38d839Srin 
2313fb38d839Srin 	callout_schedule(&sc->sc_tick_ch, hz);
2314fb38d839Srin }
2315fb38d839Srin 
2316d0d8f2a5Srin /*********************************************************************
2317d0d8f2a5Srin  *
2318d0d8f2a5Srin  *  Media Ioctl callback
2319d0d8f2a5Srin  *
2320d0d8f2a5Srin  *  This routine is called whenever the user queries the status of
2321d0d8f2a5Srin  *  the interface using ifconfig.
2322d0d8f2a5Srin  *
2323d0d8f2a5Srin  **********************************************************************/
2324fb38d839Srin static void
2325d0d8f2a5Srin igc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
2326d0d8f2a5Srin {
2327d0d8f2a5Srin 	struct igc_softc *sc = ifp->if_softc;
2328fb38d839Srin 	struct igc_hw *hw = &sc->hw;
2329d0d8f2a5Srin 
2330d0d8f2a5Srin 	igc_update_link_status(sc);
2331d0d8f2a5Srin 
2332d0d8f2a5Srin 	ifmr->ifm_status = IFM_AVALID;
2333d0d8f2a5Srin 	ifmr->ifm_active = IFM_ETHER;
2334d0d8f2a5Srin 
2335d0d8f2a5Srin 	if (!sc->link_active) {
2336d0d8f2a5Srin 		ifmr->ifm_active |= IFM_NONE;
2337d0d8f2a5Srin 		return;
2338d0d8f2a5Srin 	}
2339d0d8f2a5Srin 
2340d0d8f2a5Srin 	ifmr->ifm_status |= IFM_ACTIVE;
2341d0d8f2a5Srin 
2342d0d8f2a5Srin 	switch (sc->link_speed) {
2343d0d8f2a5Srin 	case 10:
2344d0d8f2a5Srin 		ifmr->ifm_active |= IFM_10_T;
2345d0d8f2a5Srin 		break;
2346d0d8f2a5Srin 	case 100:
2347d0d8f2a5Srin 		ifmr->ifm_active |= IFM_100_TX;
2348d0d8f2a5Srin 		break;
2349d0d8f2a5Srin 	case 1000:
2350d0d8f2a5Srin 		ifmr->ifm_active |= IFM_1000_T;
2351d0d8f2a5Srin 		break;
2352d0d8f2a5Srin 	case 2500:
2353d0d8f2a5Srin 		ifmr->ifm_active |= IFM_2500_T;
2354d0d8f2a5Srin 		break;
2355d0d8f2a5Srin 	}
2356d0d8f2a5Srin 
2357d0d8f2a5Srin 	if (sc->link_duplex == FULL_DUPLEX)
2358d0d8f2a5Srin 		ifmr->ifm_active |= IFM_FDX;
2359d0d8f2a5Srin 	else
2360d0d8f2a5Srin 		ifmr->ifm_active |= IFM_HDX;
2361fb38d839Srin 
2362fb38d839Srin 	switch (hw->fc.current_mode) {
2363fb38d839Srin 	case igc_fc_tx_pause:
2364fb38d839Srin 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
2365fb38d839Srin 		break;
2366fb38d839Srin 	case igc_fc_rx_pause:
2367fb38d839Srin 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
2368fb38d839Srin 		break;
2369fb38d839Srin 	case igc_fc_full:
2370fb38d839Srin 		ifmr->ifm_active |= IFM_FLOW |
2371fb38d839Srin 		    IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE;
2372fb38d839Srin 		break;
2373fb38d839Srin 	case igc_fc_none:
2374fb38d839Srin 	default:
2375fb38d839Srin 		break;
2376fb38d839Srin 	}
2377d0d8f2a5Srin }
2378d0d8f2a5Srin 
2379d0d8f2a5Srin /*********************************************************************
2380d0d8f2a5Srin  *
2381d0d8f2a5Srin  *  Media Ioctl callback
2382d0d8f2a5Srin  *
2383d0d8f2a5Srin  *  This routine is called when the user changes speed/duplex using
2384d0d8f2a5Srin  *  media/mediopt option with ifconfig.
2385d0d8f2a5Srin  *
2386d0d8f2a5Srin  **********************************************************************/
2387fb38d839Srin static int
2388d0d8f2a5Srin igc_media_change(struct ifnet *ifp)
2389d0d8f2a5Srin {
2390d0d8f2a5Srin 	struct igc_softc *sc = ifp->if_softc;
2391d0d8f2a5Srin 	struct ifmedia *ifm = &sc->media;
2392d0d8f2a5Srin 
2393d0d8f2a5Srin 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
2394fb38d839Srin 		return EINVAL;
2395d0d8f2a5Srin 
2396d0d8f2a5Srin 	sc->hw.mac.autoneg = DO_AUTO_NEG;
2397d0d8f2a5Srin 
2398d0d8f2a5Srin 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
2399d0d8f2a5Srin 	case IFM_AUTO:
2400d0d8f2a5Srin 		sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
2401d0d8f2a5Srin 		break;
2402d0d8f2a5Srin 	case IFM_2500_T:
2403d0d8f2a5Srin 		sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
2404d0d8f2a5Srin 		break;
2405d0d8f2a5Srin 	case IFM_1000_T:
2406d0d8f2a5Srin 		sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
2407d0d8f2a5Srin 		break;
2408d0d8f2a5Srin 	case IFM_100_TX:
2409e736465aSmsaitoh 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2410d0d8f2a5Srin 			sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
2411e736465aSmsaitoh 		else
2412e736465aSmsaitoh 			sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
2413d0d8f2a5Srin 		break;
2414d0d8f2a5Srin 	case IFM_10_T:
2415e736465aSmsaitoh 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
2416d0d8f2a5Srin 			sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
2417e736465aSmsaitoh 		else
2418e736465aSmsaitoh 			sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
2419d0d8f2a5Srin 		break;
2420d0d8f2a5Srin 	default:
2421d0d8f2a5Srin 		return EINVAL;
2422d0d8f2a5Srin 	}
2423d0d8f2a5Srin 
2424fb38d839Srin 	igc_init_locked(sc);
2425d0d8f2a5Srin 
2426d0d8f2a5Srin 	return 0;
2427d0d8f2a5Srin }
2428d0d8f2a5Srin 
2429fb38d839Srin static int
2430fb38d839Srin igc_ifflags_cb(struct ethercom *ec)
2431d0d8f2a5Srin {
2432fb38d839Srin 	struct ifnet *ifp = &ec->ec_if;
2433fb38d839Srin 	struct igc_softc *sc = ifp->if_softc;
2434fb38d839Srin 	int rc = 0;
2435fb38d839Srin 	u_short iffchange;
2436fb38d839Srin 	bool needreset = false;
2437fb38d839Srin 
2438fb38d839Srin 	DPRINTF(CFG, "called\n");
2439fb38d839Srin 
2440fb38d839Srin 	KASSERT(IFNET_LOCKED(ifp));
2441fb38d839Srin 
2442fb38d839Srin 	mutex_enter(&sc->sc_core_lock);
2443fb38d839Srin 
2444fb38d839Srin 	/*
2445fb38d839Srin 	 * Check for if_flags.
2446fb38d839Srin 	 * Main usage is to prevent linkdown when opening bpf.
2447fb38d839Srin 	 */
2448fb38d839Srin 	iffchange = ifp->if_flags ^ sc->sc_if_flags;
2449fb38d839Srin 	sc->sc_if_flags = ifp->if_flags;
2450fb38d839Srin 	if ((iffchange & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0) {
2451fb38d839Srin 		needreset = true;
2452fb38d839Srin 		goto ec;
2453fb38d839Srin 	}
2454fb38d839Srin 
2455fb38d839Srin 	/* iff related updates */
2456fb38d839Srin 	if ((iffchange & IFF_PROMISC) != 0)
2457fb38d839Srin 		igc_set_filter(sc);
2458fb38d839Srin 
2459fb38d839Srin #ifdef notyet
2460fb38d839Srin 	igc_set_vlan(sc);
2461fb38d839Srin #endif
2462fb38d839Srin 
2463fb38d839Srin ec:
2464fb38d839Srin #ifdef notyet
2465fb38d839Srin 	/* Check for ec_capenable. */
2466fb38d839Srin 	ecchange = ec->ec_capenable ^ sc->sc_ec_capenable;
2467fb38d839Srin 	sc->sc_ec_capenable = ec->ec_capenable;
2468fb38d839Srin 	if ((ecchange & ~ETHERCAP_SOMETHING) != 0) {
2469fb38d839Srin 		needreset = true;
2470fb38d839Srin 		goto out;
2471fb38d839Srin 	}
2472fb38d839Srin #endif
2473fb38d839Srin 	if (needreset)
2474fb38d839Srin 		rc = ENETRESET;
2475fb38d839Srin 
2476fb38d839Srin 	mutex_exit(&sc->sc_core_lock);
2477fb38d839Srin 
2478fb38d839Srin 	return rc;
2479fb38d839Srin }
2480fb38d839Srin 
2481fb38d839Srin static void
2482fb38d839Srin igc_set_filter(struct igc_softc *sc)
2483fb38d839Srin {
2484fb38d839Srin 	struct ethercom *ec = &sc->sc_ec;
2485fb38d839Srin 	uint32_t rctl;
2486fb38d839Srin 
2487fb38d839Srin 	rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
2488fb38d839Srin 	rctl &= ~(IGC_RCTL_BAM |IGC_RCTL_UPE | IGC_RCTL_MPE);
2489fb38d839Srin 
2490fb38d839Srin 	if ((sc->sc_if_flags & IFF_BROADCAST) != 0)
2491fb38d839Srin 		rctl |= IGC_RCTL_BAM;
2492fb38d839Srin 	if ((sc->sc_if_flags & IFF_PROMISC) != 0) {
2493fb38d839Srin 		DPRINTF(CFG, "promisc\n");
2494fb38d839Srin 		rctl |= IGC_RCTL_UPE;
2495fb38d839Srin 		ETHER_LOCK(ec);
2496fb38d839Srin  allmulti:
2497fb38d839Srin 		ec->ec_flags |= ETHER_F_ALLMULTI;
2498fb38d839Srin 		ETHER_UNLOCK(ec);
2499fb38d839Srin 		rctl |= IGC_RCTL_MPE;
2500fb38d839Srin 	} else {
2501d0d8f2a5Srin 		struct ether_multistep step;
2502fb38d839Srin 		struct ether_multi *enm;
2503d0d8f2a5Srin 		int mcnt = 0;
2504d0d8f2a5Srin 
2505fb38d839Srin 		memset(sc->mta, 0, IGC_MTA_LEN);
2506d0d8f2a5Srin 
2507fb38d839Srin 		ETHER_LOCK(ec);
2508fb38d839Srin 		ETHER_FIRST_MULTI(step, ec, enm);
2509d0d8f2a5Srin 		while (enm != NULL) {
2510fb38d839Srin 			if (((memcmp(enm->enm_addrlo, enm->enm_addrhi,
2511fb38d839Srin 					ETHER_ADDR_LEN)) != 0) ||
2512fb38d839Srin 			    (mcnt >= MAX_NUM_MULTICAST_ADDRESSES)) {
2513fb38d839Srin 				/*
2514fb38d839Srin 				 * We must listen to a range of multicast
2515fb38d839Srin 				 * addresses. For now, just accept all
2516fb38d839Srin 				 * multicasts, rather than trying to set only
2517fb38d839Srin 				 * those filter bits needed to match the range.
2518fb38d839Srin 				 * (At this time, the only use of address
2519fb38d839Srin 				 * ranges is for IP multicast routing, for
2520fb38d839Srin 				 * which the range is big enough to require all
2521fb38d839Srin 				 * bits set.)
2522fb38d839Srin 				 */
2523fb38d839Srin 				goto allmulti;
2524fb38d839Srin 			}
2525fb38d839Srin 			DPRINTF(CFG, "%d: %s\n", mcnt,
2526fb38d839Srin 			    ether_sprintf(enm->enm_addrlo));
2527fb38d839Srin 			memcpy(&sc->mta[mcnt * ETHER_ADDR_LEN],
2528fb38d839Srin 			    enm->enm_addrlo, ETHER_ADDR_LEN);
2529d0d8f2a5Srin 
2530fb38d839Srin 			mcnt++;
2531d0d8f2a5Srin 			ETHER_NEXT_MULTI(step, enm);
2532d0d8f2a5Srin 		}
2533fb38d839Srin 		ec->ec_flags &= ~ETHER_F_ALLMULTI;
2534fb38d839Srin 		ETHER_UNLOCK(ec);
2535d0d8f2a5Srin 
2536fb38d839Srin 		DPRINTF(CFG, "hw filter\n");
2537fb38d839Srin 		igc_update_mc_addr_list(&sc->hw, sc->mta, mcnt);
2538d0d8f2a5Srin 	}
2539d0d8f2a5Srin 
2540fb38d839Srin 	IGC_WRITE_REG(&sc->hw, IGC_RCTL, rctl);
2541d0d8f2a5Srin }
2542d0d8f2a5Srin 
2543fb38d839Srin static void
2544d0d8f2a5Srin igc_update_link_status(struct igc_softc *sc)
2545d0d8f2a5Srin {
2546fb38d839Srin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2547d0d8f2a5Srin 	struct igc_hw *hw = &sc->hw;
2548d0d8f2a5Srin 
2549ebb0ccdbSmsaitoh 	if (hw->mac.get_link_status == true)
2550ebb0ccdbSmsaitoh 		igc_check_for_link(hw);
2551ebb0ccdbSmsaitoh 
2552d0d8f2a5Srin 	if (IGC_READ_REG(&sc->hw, IGC_STATUS) & IGC_STATUS_LU) {
2553d0d8f2a5Srin 		if (sc->link_active == 0) {
2554d0d8f2a5Srin 			igc_get_speed_and_duplex(hw, &sc->link_speed,
2555d0d8f2a5Srin 			    &sc->link_duplex);
2556d0d8f2a5Srin 			sc->link_active = 1;
2557d0d8f2a5Srin 			ifp->if_baudrate = IF_Mbps(sc->link_speed);
2558fb38d839Srin 			if_link_state_change(ifp, LINK_STATE_UP);
2559d0d8f2a5Srin 		}
2560d0d8f2a5Srin 	} else {
2561d0d8f2a5Srin 		if (sc->link_active == 1) {
2562d0d8f2a5Srin 			ifp->if_baudrate = sc->link_speed = 0;
2563d0d8f2a5Srin 			sc->link_duplex = 0;
2564d0d8f2a5Srin 			sc->link_active = 0;
2565fb38d839Srin 			if_link_state_change(ifp, LINK_STATE_DOWN);
2566d0d8f2a5Srin 		}
2567d0d8f2a5Srin 	}
2568d0d8f2a5Srin }
2569d0d8f2a5Srin 
2570d0d8f2a5Srin /*********************************************************************
2571d0d8f2a5Srin  *
2572d0d8f2a5Srin  *  Get a buffer from system mbuf buffer pool.
2573d0d8f2a5Srin  *
2574d0d8f2a5Srin  **********************************************************************/
2575fb38d839Srin static int
2576fb38d839Srin igc_get_buf(struct rx_ring *rxr, int id, bool strict)
2577d0d8f2a5Srin {
2578d0d8f2a5Srin 	struct igc_softc *sc = rxr->sc;
2579fb38d839Srin 	struct igc_queue *q = rxr->rxr_igcq;
2580fb38d839Srin 	struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
2581fb38d839Srin 	bus_dmamap_t map = rxbuf->map;
2582d0d8f2a5Srin 	struct mbuf *m;
2583d0d8f2a5Srin 	int error;
2584d0d8f2a5Srin 
2585fb38d839Srin 	if (__predict_false(rxbuf->buf)) {
2586fb38d839Srin 		if (strict) {
2587fb38d839Srin 			DPRINTF(RX, "slot %d already has an mbuf\n", id);
2588fb38d839Srin 			return EINVAL;
2589fb38d839Srin 		}
2590fb38d839Srin 		return 0;
2591fb38d839Srin 	}
2592fb38d839Srin 
2593fb38d839Srin 	MGETHDR(m, M_DONTWAIT, MT_DATA);
2594fb38d839Srin 	if (__predict_false(m == NULL)) {
2595fb38d839Srin  enobuf:
2596fb38d839Srin 		IGC_QUEUE_EVENT(q, rx_no_mbuf, 1);
2597d0d8f2a5Srin 		return ENOBUFS;
2598d0d8f2a5Srin 	}
2599*972ad69cSmlelstv 	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
2600d0d8f2a5Srin 
2601fb38d839Srin 	MCLGET(m, M_DONTWAIT);
2602fb38d839Srin 	if (__predict_false(!(m->m_flags & M_EXT))) {
2603fb38d839Srin 		m_freem(m);
2604fb38d839Srin 		goto enobuf;
2605fb38d839Srin 	}
2606d0d8f2a5Srin 
2607d0d8f2a5Srin 	m->m_len = m->m_pkthdr.len = sc->rx_mbuf_sz;
2608d0d8f2a5Srin 
2609fb38d839Srin 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, map, m,
2610fb38d839Srin 	    BUS_DMA_READ | BUS_DMA_NOWAIT);
2611d0d8f2a5Srin 	if (error) {
2612d0d8f2a5Srin 		m_freem(m);
2613d0d8f2a5Srin 		return error;
2614d0d8f2a5Srin 	}
2615d0d8f2a5Srin 
2616fb38d839Srin 	bus_dmamap_sync(rxr->rxdma.dma_tag, map, 0,
2617fb38d839Srin 	    map->dm_mapsize, BUS_DMASYNC_PREREAD);
2618d0d8f2a5Srin 	rxbuf->buf = m;
2619d0d8f2a5Srin 
2620fb38d839Srin 	union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
2621fb38d839Srin 	igc_rxdesc_sync(rxr, id, BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2622fb38d839Srin 	rxdesc->read.pkt_addr = htole64(map->dm_segs[0].ds_addr);
2623fb38d839Srin 	igc_rxdesc_sync(rxr, id, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2624d0d8f2a5Srin 
2625d0d8f2a5Srin 	return 0;
2626d0d8f2a5Srin }
2627d0d8f2a5Srin 
2628fb38d839Srin static void
2629d0d8f2a5Srin igc_configure_queues(struct igc_softc *sc)
2630d0d8f2a5Srin {
2631d0d8f2a5Srin 	struct igc_hw *hw = &sc->hw;
2632fb38d839Srin 	uint32_t ivar;
2633d0d8f2a5Srin 
2634d0d8f2a5Srin 	/* First turn on RSS capability */
2635d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME |
2636d0d8f2a5Srin 	    IGC_GPIE_PBA | IGC_GPIE_NSICR);
2637d0d8f2a5Srin 
2638d0d8f2a5Srin 	/* Set the starting interrupt rate */
2639fb38d839Srin 	uint32_t newitr = (4000000 / MAX_INTS_PER_SEC) & 0x7FFC;
2640d0d8f2a5Srin 	newitr |= IGC_EITR_CNT_IGNR;
2641d0d8f2a5Srin 
2642d0d8f2a5Srin 	/* Turn on MSI-X */
2643fb38d839Srin 	uint32_t newmask = 0;
2644fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2645fb38d839Srin 		struct igc_queue *q = &sc->queues[iq];
2646fb38d839Srin 
2647d0d8f2a5Srin 		/* RX entries */
2648fb38d839Srin 		igc_set_queues(sc, iq, q->msix, 0);
2649d0d8f2a5Srin 		/* TX entries */
2650fb38d839Srin 		igc_set_queues(sc, iq, q->msix, 1);
2651fb38d839Srin 		newmask |= q->eims;
2652fb38d839Srin 		IGC_WRITE_REG(hw, IGC_EITR(q->msix), newitr);
2653d0d8f2a5Srin 	}
2654fb38d839Srin 	sc->msix_queuesmask = newmask;
2655fb38d839Srin 
2656fb38d839Srin #if 1
2657fb38d839Srin 	ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, 0);
2658fb38d839Srin 	DPRINTF(CFG, "ivar(0)=0x%x\n", ivar);
2659fb38d839Srin 	ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, 1);
2660fb38d839Srin 	DPRINTF(CFG, "ivar(1)=0x%x\n", ivar);
2661fb38d839Srin #endif
2662d0d8f2a5Srin 
2663d0d8f2a5Srin 	/* And for the link interrupt */
2664d0d8f2a5Srin 	ivar = (sc->linkvec | IGC_IVAR_VALID) << 8;
2665d0d8f2a5Srin 	sc->msix_linkmask = 1 << sc->linkvec;
2666d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar);
2667d0d8f2a5Srin }
2668d0d8f2a5Srin 
2669fb38d839Srin static void
2670d0d8f2a5Srin igc_set_queues(struct igc_softc *sc, uint32_t entry, uint32_t vector, int type)
2671d0d8f2a5Srin {
2672d0d8f2a5Srin 	struct igc_hw *hw = &sc->hw;
2673fb38d839Srin 	const uint32_t index = entry >> 1;
2674fb38d839Srin 	uint32_t ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index);
2675d0d8f2a5Srin 
2676d0d8f2a5Srin 	if (type) {
2677d0d8f2a5Srin 		if (entry & 1) {
2678d0d8f2a5Srin 			ivar &= 0x00FFFFFF;
2679d0d8f2a5Srin 			ivar |= (vector | IGC_IVAR_VALID) << 24;
2680d0d8f2a5Srin 		} else {
2681d0d8f2a5Srin 			ivar &= 0xFFFF00FF;
2682d0d8f2a5Srin 			ivar |= (vector | IGC_IVAR_VALID) << 8;
2683d0d8f2a5Srin 		}
2684d0d8f2a5Srin 	} else {
2685d0d8f2a5Srin 		if (entry & 1) {
2686d0d8f2a5Srin 			ivar &= 0xFF00FFFF;
2687d0d8f2a5Srin 			ivar |= (vector | IGC_IVAR_VALID) << 16;
2688d0d8f2a5Srin 		} else {
2689d0d8f2a5Srin 			ivar &= 0xFFFFFF00;
2690d0d8f2a5Srin 			ivar |= vector | IGC_IVAR_VALID;
2691d0d8f2a5Srin 		}
2692d0d8f2a5Srin 	}
2693d0d8f2a5Srin 	IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar);
2694d0d8f2a5Srin }
2695d0d8f2a5Srin 
2696fb38d839Srin static void
2697d0d8f2a5Srin igc_enable_queue(struct igc_softc *sc, uint32_t eims)
2698d0d8f2a5Srin {
2699d0d8f2a5Srin 	IGC_WRITE_REG(&sc->hw, IGC_EIMS, eims);
2700d0d8f2a5Srin }
2701d0d8f2a5Srin 
2702fb38d839Srin static void
2703d0d8f2a5Srin igc_enable_intr(struct igc_softc *sc)
2704d0d8f2a5Srin {
2705d0d8f2a5Srin 	struct igc_hw *hw = &sc->hw;
2706d0d8f2a5Srin 
2707fb38d839Srin 	if (sc->sc_intr_type == PCI_INTR_TYPE_MSIX) {
2708fb38d839Srin 		const uint32_t mask = sc->msix_queuesmask | sc->msix_linkmask;
2709fb38d839Srin 
2710d0d8f2a5Srin 		IGC_WRITE_REG(hw, IGC_EIAC, mask);
2711d0d8f2a5Srin 		IGC_WRITE_REG(hw, IGC_EIAM, mask);
2712d0d8f2a5Srin 		IGC_WRITE_REG(hw, IGC_EIMS, mask);
2713d0d8f2a5Srin 		IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC);
2714fb38d839Srin 	} else {
2715fb38d839Srin 		IGC_WRITE_REG(hw, IGC_IMS, IMS_ENABLE_MASK);
2716fb38d839Srin 	}
2717d0d8f2a5Srin 	IGC_WRITE_FLUSH(hw);
2718d0d8f2a5Srin }
2719d0d8f2a5Srin 
2720fb38d839Srin static void
2721d0d8f2a5Srin igc_disable_intr(struct igc_softc *sc)
2722d0d8f2a5Srin {
2723d0d8f2a5Srin 	struct igc_hw *hw = &sc->hw;
2724d0d8f2a5Srin 
2725fb38d839Srin 	if (sc->sc_intr_type == PCI_INTR_TYPE_MSIX) {
2726d0d8f2a5Srin 		IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff);
2727d0d8f2a5Srin 		IGC_WRITE_REG(hw, IGC_EIAC, 0);
2728fb38d839Srin 	}
2729d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
2730d0d8f2a5Srin 	IGC_WRITE_FLUSH(hw);
2731d0d8f2a5Srin }
2732d0d8f2a5Srin 
2733fb38d839Srin static int
2734d0d8f2a5Srin igc_intr_link(void *arg)
2735d0d8f2a5Srin {
2736d0d8f2a5Srin 	struct igc_softc *sc = (struct igc_softc *)arg;
2737fb38d839Srin 	const uint32_t reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR);
2738fb38d839Srin 
2739fb38d839Srin 	IGC_GLOBAL_EVENT(sc, link, 1);
2740d0d8f2a5Srin 
2741d0d8f2a5Srin 	if (reg_icr & IGC_ICR_LSC) {
2742fb38d839Srin 		mutex_enter(&sc->sc_core_lock);
2743d0d8f2a5Srin 		sc->hw.mac.get_link_status = true;
2744d0d8f2a5Srin 		igc_update_link_status(sc);
2745fb38d839Srin 		mutex_exit(&sc->sc_core_lock);
2746d0d8f2a5Srin 	}
2747d0d8f2a5Srin 
2748d0d8f2a5Srin 	IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC);
2749d0d8f2a5Srin 	IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->msix_linkmask);
2750d0d8f2a5Srin 
2751d0d8f2a5Srin 	return 1;
2752d0d8f2a5Srin }
2753d0d8f2a5Srin 
2754fb38d839Srin static int
2755d0d8f2a5Srin igc_intr_queue(void *arg)
2756d0d8f2a5Srin {
2757d0d8f2a5Srin 	struct igc_queue *iq = arg;
2758d0d8f2a5Srin 	struct igc_softc *sc = iq->sc;
2759fb38d839Srin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2760d0d8f2a5Srin 	struct rx_ring *rxr = iq->rxr;
2761d0d8f2a5Srin 	struct tx_ring *txr = iq->txr;
2762fb38d839Srin 	const u_int txlimit = sc->sc_tx_intr_process_limit,
2763fb38d839Srin 		    rxlimit = sc->sc_rx_intr_process_limit;
2764fb38d839Srin 	bool txmore, rxmore;
2765d0d8f2a5Srin 
2766fb38d839Srin 	IGC_QUEUE_EVENT(iq, irqs, 1);
2767fb38d839Srin 
2768fb38d839Srin 	if (__predict_false(!ISSET(ifp->if_flags, IFF_RUNNING)))
2769fb38d839Srin 		return 0;
2770fb38d839Srin 
2771fb38d839Srin 	mutex_enter(&txr->txr_lock);
2772fb38d839Srin 	txmore = igc_txeof(txr, txlimit);
2773fb38d839Srin 	mutex_exit(&txr->txr_lock);
2774fb38d839Srin 	mutex_enter(&rxr->rxr_lock);
2775fb38d839Srin 	rxmore = igc_rxeof(rxr, rxlimit);
2776fb38d839Srin 	mutex_exit(&rxr->rxr_lock);
2777fb38d839Srin 
2778fb38d839Srin 	if (txmore || rxmore) {
2779fb38d839Srin 		IGC_QUEUE_EVENT(iq, req, 1);
2780fb38d839Srin 		igc_sched_handle_queue(sc, iq);
2781fb38d839Srin 	} else {
2782fb38d839Srin 		igc_enable_queue(sc, iq->eims);
2783d0d8f2a5Srin 	}
2784d0d8f2a5Srin 
2785fb38d839Srin 	return 1;
2786fb38d839Srin }
2787fb38d839Srin 
2788fb38d839Srin static int
2789fb38d839Srin igc_intr(void *arg)
2790fb38d839Srin {
2791fb38d839Srin 	struct igc_softc *sc = arg;
2792fb38d839Srin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
2793fb38d839Srin 	struct igc_queue *iq = &sc->queues[0];
2794fb38d839Srin 	struct rx_ring *rxr = iq->rxr;
2795fb38d839Srin 	struct tx_ring *txr = iq->txr;
2796fb38d839Srin 	const u_int txlimit = sc->sc_tx_intr_process_limit,
2797fb38d839Srin 		    rxlimit = sc->sc_rx_intr_process_limit;
2798fb38d839Srin 	bool txmore, rxmore;
2799fb38d839Srin 
2800fb38d839Srin 	if (__predict_false(!ISSET(ifp->if_flags, IFF_RUNNING)))
2801fb38d839Srin 		return 0;
2802fb38d839Srin 
2803fb38d839Srin 	const uint32_t reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR);
2804fb38d839Srin 	DPRINTF(MISC, "reg_icr=0x%x\n", reg_icr);
2805fb38d839Srin 
2806fb38d839Srin 	/* Definitely not our interrupt. */
2807fb38d839Srin 	if (reg_icr == 0x0) {
280834ec33ebSrin 		DPRINTF(MISC, "not for me\n");
2809fb38d839Srin 		return 0;
2810fb38d839Srin 	}
2811fb38d839Srin 
2812fb38d839Srin 	IGC_QUEUE_EVENT(iq, irqs, 1);
2813fb38d839Srin 
2814fb38d839Srin 	/* Hot eject? */
2815fb38d839Srin 	if (__predict_false(reg_icr == 0xffffffff)) {
2816fb38d839Srin 		DPRINTF(MISC, "hot eject\n");
2817fb38d839Srin 		return 0;
2818fb38d839Srin 	}
2819fb38d839Srin 
2820fb38d839Srin 	if (__predict_false(!(reg_icr & IGC_ICR_INT_ASSERTED))) {
2821fb38d839Srin 		DPRINTF(MISC, "not set IGC_ICR_INT_ASSERTED");
2822fb38d839Srin 		return 0;
2823fb38d839Srin 	}
2824fb38d839Srin 
2825fb38d839Srin 	/*
2826fb38d839Srin 	 * Only MSI-X interrupts have one-shot behavior by taking advantage
2827fb38d839Srin 	 * of the EIAC register.  Thus, explicitly disable interrupts.  This
2828fb38d839Srin 	 * also works around the MSI message reordering errata on certain
2829fb38d839Srin 	 * systems.
2830fb38d839Srin 	 */
2831fb38d839Srin 	igc_disable_intr(sc);
2832fb38d839Srin 
2833fb38d839Srin 	mutex_enter(&txr->txr_lock);
2834fb38d839Srin 	txmore = igc_txeof(txr, txlimit);
2835fb38d839Srin 	mutex_exit(&txr->txr_lock);
2836fb38d839Srin 	mutex_enter(&rxr->rxr_lock);
2837fb38d839Srin 	rxmore = igc_rxeof(rxr, rxlimit);
2838fb38d839Srin 	mutex_exit(&rxr->rxr_lock);
2839fb38d839Srin 
2840fb38d839Srin 	/* Link status change */
2841fb38d839Srin 	// XXXX FreeBSD checks IGC_ICR_RXSEQ
2842fb38d839Srin 	if (__predict_false(reg_icr & IGC_ICR_LSC)) {
2843fb38d839Srin 		IGC_GLOBAL_EVENT(sc, link, 1);
2844fb38d839Srin 		mutex_enter(&sc->sc_core_lock);
2845fb38d839Srin 		sc->hw.mac.get_link_status = true;
2846fb38d839Srin 		igc_update_link_status(sc);
2847fb38d839Srin 		mutex_exit(&sc->sc_core_lock);
2848fb38d839Srin 	}
2849fb38d839Srin 
2850fb38d839Srin 	if (txmore || rxmore) {
2851fb38d839Srin 		IGC_QUEUE_EVENT(iq, req, 1);
2852fb38d839Srin 		igc_sched_handle_queue(sc, iq);
2853fb38d839Srin 	} else {
2854fb38d839Srin 		igc_enable_intr(sc);
2855fb38d839Srin 	}
2856d0d8f2a5Srin 
2857d0d8f2a5Srin 	return 1;
2858d0d8f2a5Srin }
2859d0d8f2a5Srin 
2860fb38d839Srin static void
2861fb38d839Srin igc_handle_queue(void *arg)
2862fb38d839Srin {
2863fb38d839Srin 	struct igc_queue *iq = arg;
2864fb38d839Srin 	struct igc_softc *sc = iq->sc;
2865fb38d839Srin 	struct tx_ring *txr = iq->txr;
2866fb38d839Srin 	struct rx_ring *rxr = iq->rxr;
2867fb38d839Srin 	const u_int txlimit = sc->sc_tx_process_limit,
2868fb38d839Srin 		    rxlimit = sc->sc_rx_process_limit;
2869fb38d839Srin 	bool txmore, rxmore;
2870fb38d839Srin 
2871fb38d839Srin 	IGC_QUEUE_EVENT(iq, handleq, 1);
2872fb38d839Srin 
2873fb38d839Srin 	mutex_enter(&txr->txr_lock);
2874fb38d839Srin 	txmore = igc_txeof(txr, txlimit);
2875fb38d839Srin 	/* for ALTQ, dequeue from if_snd */
2876fb38d839Srin 	if (txr->me == 0) {
2877fb38d839Srin 		struct ifnet *ifp = &sc->sc_ec.ec_if;
2878fb38d839Srin 
2879fb38d839Srin 		igc_tx_common_locked(ifp, txr, IGC_TX_START);
2880fb38d839Srin 	}
2881fb38d839Srin 	mutex_exit(&txr->txr_lock);
2882fb38d839Srin 
2883fb38d839Srin 	mutex_enter(&rxr->rxr_lock);
2884fb38d839Srin 	rxmore = igc_rxeof(rxr, rxlimit);
2885fb38d839Srin 	mutex_exit(&rxr->rxr_lock);
2886fb38d839Srin 
2887fb38d839Srin 	if (txmore || rxmore) {
2888fb38d839Srin 		igc_sched_handle_queue(sc, iq);
2889fb38d839Srin 	} else {
2890fb38d839Srin 		if (sc->sc_intr_type == PCI_INTR_TYPE_MSIX)
2891fb38d839Srin 			igc_enable_queue(sc, iq->eims);
2892fb38d839Srin 		else
2893fb38d839Srin 			igc_enable_intr(sc);
2894fb38d839Srin 	}
2895fb38d839Srin }
2896fb38d839Srin 
2897fb38d839Srin static void
2898fb38d839Srin igc_handle_queue_work(struct work *wk, void *context)
2899fb38d839Srin {
2900fb38d839Srin 	struct igc_queue *iq =
2901fb38d839Srin 	    container_of(wk, struct igc_queue, igcq_wq_cookie);
2902fb38d839Srin 
2903fb38d839Srin 	igc_handle_queue(iq);
2904fb38d839Srin }
2905fb38d839Srin 
2906fb38d839Srin static void
2907fb38d839Srin igc_sched_handle_queue(struct igc_softc *sc, struct igc_queue *iq)
2908fb38d839Srin {
2909fb38d839Srin 
2910fb38d839Srin 	if (iq->igcq_workqueue) {
2911fb38d839Srin 		/* XXXRO notyet */
2912fb38d839Srin 		workqueue_enqueue(sc->sc_queue_wq, &iq->igcq_wq_cookie,
2913fb38d839Srin 		    curcpu());
2914fb38d839Srin 	} else {
2915fb38d839Srin 		softint_schedule(iq->igcq_si);
2916fb38d839Srin 	}
2917fb38d839Srin }
2918fb38d839Srin 
2919fb38d839Srin static void
2920fb38d839Srin igc_barrier_handle_queue(struct igc_softc *sc)
2921fb38d839Srin {
2922fb38d839Srin 
2923fb38d839Srin 	if (sc->sc_txrx_workqueue) {
2924fb38d839Srin 		for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2925fb38d839Srin 			struct igc_queue *q = &sc->queues[iq];
2926fb38d839Srin 
2927fb38d839Srin 			workqueue_wait(sc->sc_queue_wq, &q->igcq_wq_cookie);
2928fb38d839Srin 		}
2929fb38d839Srin 	} else {
2930fb38d839Srin 		xc_barrier(0);
2931fb38d839Srin 	}
2932fb38d839Srin }
2933fb38d839Srin 
2934d0d8f2a5Srin /*********************************************************************
2935d0d8f2a5Srin  *
2936d0d8f2a5Srin  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2937d0d8f2a5Srin  *  the information needed to transmit a packet on the wire.
2938d0d8f2a5Srin  *
2939d0d8f2a5Srin  **********************************************************************/
2940fb38d839Srin static int
2941d0d8f2a5Srin igc_allocate_transmit_buffers(struct tx_ring *txr)
2942d0d8f2a5Srin {
2943d0d8f2a5Srin 	struct igc_softc *sc = txr->sc;
2944fb38d839Srin 	int error;
2945d0d8f2a5Srin 
2946fb38d839Srin 	txr->tx_buffers =
2947fb38d839Srin 	    kmem_zalloc(sc->num_tx_desc * sizeof(struct igc_tx_buf), KM_SLEEP);
2948d0d8f2a5Srin 	txr->txtag = txr->txdma.dma_tag;
2949d0d8f2a5Srin 
2950d0d8f2a5Srin 	/* Create the descriptor buffer dma maps. */
2951fb38d839Srin 	for (int id = 0; id < sc->num_tx_desc; id++) {
2952fb38d839Srin 		struct igc_tx_buf *txbuf = &txr->tx_buffers[id];
2953fb38d839Srin 
2954fb38d839Srin 		error = bus_dmamap_create(txr->txdma.dma_tag,
2955fb38d839Srin 		    round_page(IGC_TSO_SIZE + sizeof(struct ether_vlan_header)),
2956d0d8f2a5Srin 		    IGC_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT, &txbuf->map);
2957d0d8f2a5Srin 		if (error != 0) {
2958fb38d839Srin 			aprint_error_dev(sc->sc_dev,
2959fb38d839Srin 			    "unable to create TX DMA map\n");
2960d0d8f2a5Srin 			goto fail;
2961d0d8f2a5Srin 		}
2962fb38d839Srin 
2963fb38d839Srin 		txbuf->eop_index = -1;
2964d0d8f2a5Srin 	}
2965d0d8f2a5Srin 
2966d0d8f2a5Srin 	return 0;
2967d0d8f2a5Srin  fail:
2968d0d8f2a5Srin 	return error;
2969d0d8f2a5Srin }
2970d0d8f2a5Srin 
2971d0d8f2a5Srin 
2972d0d8f2a5Srin /*********************************************************************
2973d0d8f2a5Srin  *
2974d0d8f2a5Srin  *  Allocate and initialize transmit structures.
2975d0d8f2a5Srin  *
2976d0d8f2a5Srin  **********************************************************************/
2977fb38d839Srin static int
2978d0d8f2a5Srin igc_setup_transmit_structures(struct igc_softc *sc)
2979d0d8f2a5Srin {
2980d0d8f2a5Srin 
2981fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
2982fb38d839Srin 		struct tx_ring *txr = &sc->tx_rings[iq];
2983fb38d839Srin 
2984d0d8f2a5Srin 		if (igc_setup_transmit_ring(txr))
2985d0d8f2a5Srin 			goto fail;
2986d0d8f2a5Srin 	}
2987d0d8f2a5Srin 
2988d0d8f2a5Srin 	return 0;
2989d0d8f2a5Srin  fail:
2990d0d8f2a5Srin 	igc_free_transmit_structures(sc);
2991d0d8f2a5Srin 	return ENOBUFS;
2992d0d8f2a5Srin }
2993d0d8f2a5Srin 
2994d0d8f2a5Srin /*********************************************************************
2995d0d8f2a5Srin  *
2996d0d8f2a5Srin  *  Initialize a transmit ring.
2997d0d8f2a5Srin  *
2998d0d8f2a5Srin  **********************************************************************/
2999fb38d839Srin static int
3000d0d8f2a5Srin igc_setup_transmit_ring(struct tx_ring *txr)
3001d0d8f2a5Srin {
3002d0d8f2a5Srin 	struct igc_softc *sc = txr->sc;
3003d0d8f2a5Srin 
3004d0d8f2a5Srin 	/* Now allocate transmit buffers for the ring. */
3005d0d8f2a5Srin 	if (igc_allocate_transmit_buffers(txr))
3006d0d8f2a5Srin 		return ENOMEM;
3007d0d8f2a5Srin 
3008d0d8f2a5Srin 	/* Clear the old ring contents */
3009fb38d839Srin 	memset(txr->tx_base, 0,
3010fb38d839Srin 	    sizeof(union igc_adv_tx_desc) * sc->num_tx_desc);
3011d0d8f2a5Srin 
3012d0d8f2a5Srin 	/* Reset indices. */
3013d0d8f2a5Srin 	txr->next_avail_desc = 0;
3014d0d8f2a5Srin 	txr->next_to_clean = 0;
3015d0d8f2a5Srin 
3016d0d8f2a5Srin 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
3017d0d8f2a5Srin 	    txr->txdma.dma_map->dm_mapsize,
3018d0d8f2a5Srin 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3019d0d8f2a5Srin 
3020fb38d839Srin 	txr->txr_interq = pcq_create(sc->num_tx_desc, KM_SLEEP);
3021fb38d839Srin 
3022fb38d839Srin 	mutex_init(&txr->txr_lock, MUTEX_DEFAULT, IPL_NET);
3023fb38d839Srin 
3024d0d8f2a5Srin 	return 0;
3025d0d8f2a5Srin }
3026d0d8f2a5Srin 
3027d0d8f2a5Srin /*********************************************************************
3028d0d8f2a5Srin  *
3029d0d8f2a5Srin  *  Enable transmit unit.
3030d0d8f2a5Srin  *
3031d0d8f2a5Srin  **********************************************************************/
3032fb38d839Srin static void
3033d0d8f2a5Srin igc_initialize_transmit_unit(struct igc_softc *sc)
3034d0d8f2a5Srin {
3035fb38d839Srin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
3036d0d8f2a5Srin 	struct igc_hw *hw = &sc->hw;
3037d0d8f2a5Srin 
3038d0d8f2a5Srin 	/* Setup the Base and Length of the TX descriptor ring. */
3039fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3040fb38d839Srin 		struct tx_ring *txr = &sc->tx_rings[iq];
3041fb38d839Srin 		const uint64_t bus_addr =
3042fb38d839Srin 		    txr->txdma.dma_map->dm_segs[0].ds_addr;
3043d0d8f2a5Srin 
3044d0d8f2a5Srin 		/* Base and len of TX ring */
3045fb38d839Srin 		IGC_WRITE_REG(hw, IGC_TDLEN(iq),
3046d0d8f2a5Srin 		    sc->num_tx_desc * sizeof(union igc_adv_tx_desc));
3047fb38d839Srin 		IGC_WRITE_REG(hw, IGC_TDBAH(iq), (uint32_t)(bus_addr >> 32));
3048fb38d839Srin 		IGC_WRITE_REG(hw, IGC_TDBAL(iq), (uint32_t)bus_addr);
3049d0d8f2a5Srin 
3050d0d8f2a5Srin 		/* Init the HEAD/TAIL indices */
3051fb38d839Srin 		IGC_WRITE_REG(hw, IGC_TDT(iq), 0 /* XXX txr->next_avail_desc */);
3052fb38d839Srin 		IGC_WRITE_REG(hw, IGC_TDH(iq), 0);
3053d0d8f2a5Srin 
3054d0d8f2a5Srin 		txr->watchdog_timer = 0;
3055d0d8f2a5Srin 
3056fb38d839Srin 		uint32_t txdctl = 0;	/* Clear txdctl */
3057d0d8f2a5Srin 		txdctl |= 0x1f;		/* PTHRESH */
3058d0d8f2a5Srin 		txdctl |= 1 << 8;	/* HTHRESH */
3059d0d8f2a5Srin 		txdctl |= 1 << 16;	/* WTHRESH */
3060d0d8f2a5Srin 		txdctl |= 1 << 22;	/* Reserved bit 22 must always be 1 */
3061d0d8f2a5Srin 		txdctl |= IGC_TXDCTL_GRAN;
3062d0d8f2a5Srin 		txdctl |= 1 << 25;	/* LWTHRESH */
3063d0d8f2a5Srin 
3064fb38d839Srin 		IGC_WRITE_REG(hw, IGC_TXDCTL(iq), txdctl);
3065d0d8f2a5Srin 	}
3066d0d8f2a5Srin 	ifp->if_timer = 0;
3067d0d8f2a5Srin 
3068d0d8f2a5Srin 	/* Program the Transmit Control Register */
3069fb38d839Srin 	uint32_t tctl = IGC_READ_REG(&sc->hw, IGC_TCTL);
3070d0d8f2a5Srin 	tctl &= ~IGC_TCTL_CT;
3071d0d8f2a5Srin 	tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN |
3072d0d8f2a5Srin 	    (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));
3073d0d8f2a5Srin 
3074d0d8f2a5Srin 	/* This write will effectively turn on the transmit unit. */
3075d0d8f2a5Srin 	IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl);
3076d0d8f2a5Srin }
3077d0d8f2a5Srin 
3078d0d8f2a5Srin /*********************************************************************
3079d0d8f2a5Srin  *
3080d0d8f2a5Srin  *  Free all transmit rings.
3081d0d8f2a5Srin  *
3082d0d8f2a5Srin  **********************************************************************/
3083fb38d839Srin static void
3084d0d8f2a5Srin igc_free_transmit_structures(struct igc_softc *sc)
3085d0d8f2a5Srin {
3086d0d8f2a5Srin 
3087fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3088fb38d839Srin 		struct tx_ring *txr = &sc->tx_rings[iq];
3089fb38d839Srin 
3090d0d8f2a5Srin 		igc_free_transmit_buffers(txr);
3091d0d8f2a5Srin 	}
3092fb38d839Srin }
3093d0d8f2a5Srin 
3094d0d8f2a5Srin /*********************************************************************
3095d0d8f2a5Srin  *
3096d0d8f2a5Srin  *  Free transmit ring related data structures.
3097d0d8f2a5Srin  *
3098d0d8f2a5Srin  **********************************************************************/
3099fb38d839Srin static void
3100d0d8f2a5Srin igc_free_transmit_buffers(struct tx_ring *txr)
3101d0d8f2a5Srin {
3102d0d8f2a5Srin 	struct igc_softc *sc = txr->sc;
3103d0d8f2a5Srin 
3104d0d8f2a5Srin 	if (txr->tx_buffers == NULL)
3105d0d8f2a5Srin 		return;
3106d0d8f2a5Srin 
3107fb38d839Srin 	igc_withdraw_transmit_packets(txr, true);
3108fb38d839Srin 
3109fb38d839Srin 	kmem_free(txr->tx_buffers,
3110fb38d839Srin 	    sc->num_tx_desc * sizeof(struct igc_tx_buf));
3111fb38d839Srin 	txr->tx_buffers = NULL;
3112fb38d839Srin 	txr->txtag = NULL;
3113fb38d839Srin 
3114fb38d839Srin 	pcq_destroy(txr->txr_interq);
3115fb38d839Srin 	mutex_destroy(&txr->txr_lock);
3116fb38d839Srin }
3117fb38d839Srin 
3118fb38d839Srin /*********************************************************************
3119fb38d839Srin  *
3120fb38d839Srin  *  Withdraw transmit packets.
3121fb38d839Srin  *
3122fb38d839Srin  **********************************************************************/
3123fb38d839Srin static void
3124fb38d839Srin igc_withdraw_transmit_packets(struct tx_ring *txr, bool destroy)
3125fb38d839Srin {
3126fb38d839Srin 	struct igc_softc *sc = txr->sc;
3127fb38d839Srin 	struct igc_queue *q = txr->txr_igcq;
3128fb38d839Srin 
3129fb38d839Srin 	mutex_enter(&txr->txr_lock);
3130fb38d839Srin 
3131fb38d839Srin 	for (int id = 0; id < sc->num_tx_desc; id++) {
3132fb38d839Srin 		union igc_adv_tx_desc *txdesc = &txr->tx_base[id];
3133fb38d839Srin 
3134fb38d839Srin 		igc_txdesc_sync(txr, id,
3135fb38d839Srin 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3136fb38d839Srin 		txdesc->read.buffer_addr = 0;
3137fb38d839Srin 		txdesc->read.cmd_type_len = 0;
3138fb38d839Srin 		txdesc->read.olinfo_status = 0;
3139fb38d839Srin 		igc_txdesc_sync(txr, id,
3140fb38d839Srin 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3141fb38d839Srin 
3142fb38d839Srin 		struct igc_tx_buf *txbuf = &txr->tx_buffers[id];
3143fb38d839Srin 		bus_dmamap_t map = txbuf->map;
3144fb38d839Srin 
3145fb38d839Srin 		if (map != NULL && map->dm_nsegs > 0) {
3146fb38d839Srin 			bus_dmamap_sync(txr->txdma.dma_tag, map,
3147fb38d839Srin 			    0, map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3148fb38d839Srin 			bus_dmamap_unload(txr->txdma.dma_tag, map);
3149d0d8f2a5Srin 		}
3150d0d8f2a5Srin 		m_freem(txbuf->m_head);
3151d0d8f2a5Srin 		txbuf->m_head = NULL;
3152fb38d839Srin 		if (map != NULL && destroy) {
3153fb38d839Srin 			bus_dmamap_destroy(txr->txdma.dma_tag, map);
3154d0d8f2a5Srin 			txbuf->map = NULL;
3155d0d8f2a5Srin 		}
3156fb38d839Srin 		txbuf->eop_index = -1;
3157fb38d839Srin 
3158fb38d839Srin 		txr->next_avail_desc = 0;
3159fb38d839Srin 		txr->next_to_clean = 0;
3160d0d8f2a5Srin 	}
3161d0d8f2a5Srin 
3162fb38d839Srin 	struct mbuf *m;
3163fb38d839Srin 	while ((m = pcq_get(txr->txr_interq)) != NULL) {
3164fb38d839Srin 		IGC_QUEUE_EVENT(q, tx_pcq_drop, 1);
3165fb38d839Srin 		m_freem(m);
3166fb38d839Srin 	}
3167fb38d839Srin 
3168fb38d839Srin 	mutex_exit(&txr->txr_lock);
3169d0d8f2a5Srin }
3170d0d8f2a5Srin 
3171d0d8f2a5Srin 
3172d0d8f2a5Srin /*********************************************************************
3173d0d8f2a5Srin  *
3174d0d8f2a5Srin  *  Advanced Context Descriptor setup for VLAN, CSUM or TSO
3175d0d8f2a5Srin  *
3176d0d8f2a5Srin  **********************************************************************/
3177d0d8f2a5Srin 
3178fb38d839Srin static int
3179d0d8f2a5Srin igc_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, int prod,
3180fb38d839Srin     uint32_t *cmd_type_len, uint32_t *olinfo_status)
3181d0d8f2a5Srin {
3182fb38d839Srin 	struct ether_vlan_header *evl;
3183d0d8f2a5Srin 	uint32_t type_tucmd_mlhl = 0;
3184d0d8f2a5Srin 	uint32_t vlan_macip_lens = 0;
3185fb38d839Srin 	uint32_t ehlen, iphlen;
3186fb38d839Srin 	uint16_t ehtype;
3187d0d8f2a5Srin 	int off = 0;
3188d0d8f2a5Srin 
3189fb38d839Srin 	const int csum_flags = mp->m_pkthdr.csum_flags;
3190fb38d839Srin 
3191fb38d839Srin 	/* First check if TSO is to be used */
3192fb38d839Srin 	if ((csum_flags & (M_CSUM_TSOv4 | M_CSUM_TSOv6)) != 0) {
3193fb38d839Srin 		return igc_tso_setup(txr, mp, prod, cmd_type_len,
3194fb38d839Srin 		    olinfo_status);
3195fb38d839Srin 	}
3196fb38d839Srin 
3197fb38d839Srin 	const bool v4 = (csum_flags &
3198fb38d839Srin 	    (M_CSUM_IPv4 | M_CSUM_TCPv4 | M_CSUM_UDPv4)) != 0;
3199fb38d839Srin 	const bool v6 = (csum_flags & (M_CSUM_UDPv6 | M_CSUM_TCPv6)) != 0;
3200fb38d839Srin 
3201fb38d839Srin 	/* Indicate the whole packet as payload when not doing TSO */
3202fb38d839Srin 	*olinfo_status |= mp->m_pkthdr.len << IGC_ADVTXD_PAYLEN_SHIFT;
3203d0d8f2a5Srin 
3204d0d8f2a5Srin 	/*
3205d0d8f2a5Srin 	 * In advanced descriptors the vlan tag must
3206d0d8f2a5Srin 	 * be placed into the context descriptor. Hence
3207d0d8f2a5Srin 	 * we need to make one even if not doing offloads.
3208d0d8f2a5Srin 	 */
3209d0d8f2a5Srin #if NVLAN > 0
3210fb38d839Srin 	if (vlan_has_tag(mp)) {
3211fb38d839Srin 		vlan_macip_lens |= (uint32_t)vlan_get_tag(mp)
3212fb38d839Srin 		    << IGC_ADVTXD_VLAN_SHIFT;
3213d0d8f2a5Srin 		off = 1;
3214fb38d839Srin 	} else
3215fb38d839Srin #endif
3216fb38d839Srin 	if (!v4 && !v6)
3217fb38d839Srin 		return 0;
3218fb38d839Srin 
3219fb38d839Srin 	KASSERT(mp->m_len >= sizeof(struct ether_header));
3220fb38d839Srin 	evl = mtod(mp, struct ether_vlan_header *);
3221fb38d839Srin 	if (evl->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3222fb38d839Srin 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
3223fb38d839Srin 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3224fb38d839Srin 		ehtype = evl->evl_proto;
3225fb38d839Srin 	} else {
3226fb38d839Srin 		ehlen = ETHER_HDR_LEN;
3227fb38d839Srin 		ehtype = evl->evl_encap_proto;
3228d0d8f2a5Srin 	}
3229fb38d839Srin 
3230fb38d839Srin 	vlan_macip_lens |= ehlen << IGC_ADVTXD_MACLEN_SHIFT;
3231fb38d839Srin 
3232fb38d839Srin #ifdef IGC_DEBUG
3233fb38d839Srin 	/*
3234fb38d839Srin 	 * For checksum offloading, L3 headers are not mandatory.
3235fb38d839Srin 	 * We use these only for consistency checks.
3236fb38d839Srin 	 */
3237fb38d839Srin 	struct ip *ip;
3238fb38d839Srin 	struct ip6_hdr *ip6;
3239fb38d839Srin 	uint8_t ipproto;
3240fb38d839Srin 	char *l3d;
3241fb38d839Srin 
3242fb38d839Srin 	if (mp->m_len == ehlen && mp->m_next != NULL)
3243fb38d839Srin 		l3d = mtod(mp->m_next, char *);
3244fb38d839Srin 	else
3245fb38d839Srin 		l3d = mtod(mp, char *) + ehlen;
3246d0d8f2a5Srin #endif
3247d0d8f2a5Srin 
3248fb38d839Srin 	switch (ntohs(ehtype)) {
3249fb38d839Srin 	case ETHERTYPE_IP:
3250fb38d839Srin 		iphlen = M_CSUM_DATA_IPv4_IPHL(mp->m_pkthdr.csum_data);
3251d0d8f2a5Srin 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4;
3252fb38d839Srin 
3253fb38d839Srin 		if ((csum_flags & M_CSUM_IPv4) != 0) {
3254d0d8f2a5Srin 			*olinfo_status |= IGC_TXD_POPTS_IXSM << 8;
3255d0d8f2a5Srin 			off = 1;
3256d0d8f2a5Srin 		}
3257fb38d839Srin #ifdef IGC_DEBUG
3258fb38d839Srin 		KASSERT(!v6);
3259fb38d839Srin 		ip = (void *)l3d;
3260fb38d839Srin 		ipproto = ip->ip_p;
3261fb38d839Srin 		KASSERT(iphlen == ip->ip_hl << 2);
3262fb38d839Srin 		KASSERT((mp->m_pkthdr.csum_flags & M_CSUM_IPv4) == 0 ||
3263fb38d839Srin 		    ip->ip_sum == 0);
3264d0d8f2a5Srin #endif
3265fb38d839Srin 		break;
3266fb38d839Srin 	case ETHERTYPE_IPV6:
3267fb38d839Srin 		iphlen = M_CSUM_DATA_IPv6_IPHL(mp->m_pkthdr.csum_data);
3268fb38d839Srin 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6;
3269fb38d839Srin #ifdef IGC_DEBUG
3270fb38d839Srin 		KASSERT(!v4);
3271fb38d839Srin 		ip6 = (void *)l3d;
3272fb38d839Srin 		ipproto = ip6->ip6_nxt;	/* XXX */
3273fb38d839Srin 		KASSERT(iphlen == sizeof(struct ip6_hdr));
3274fb38d839Srin #endif
3275fb38d839Srin 		break;
3276fb38d839Srin 	default:
3277fb38d839Srin 		/*
3278fb38d839Srin 		 * Unknown L3 protocol. Clear L3 header length and proceed for
3279fb38d839Srin 		 * LAN as done by Linux driver.
3280fb38d839Srin 		 */
3281fb38d839Srin 		iphlen = 0;
3282fb38d839Srin #ifdef IGC_DEBUG
3283fb38d839Srin 		KASSERT(!v4 && !v6);
3284fb38d839Srin 		ipproto = 0;
3285fb38d839Srin #endif
3286fb38d839Srin 		break;
3287d0d8f2a5Srin 	}
3288d0d8f2a5Srin 
3289d0d8f2a5Srin 	vlan_macip_lens |= iphlen;
3290d0d8f2a5Srin 
3291fb38d839Srin 	const bool tcp = (csum_flags & (M_CSUM_TCPv4 | M_CSUM_TCPv6)) != 0;
3292fb38d839Srin 	const bool udp = (csum_flags & (M_CSUM_UDPv4 | M_CSUM_UDPv6)) != 0;
3293fb38d839Srin 
3294fb38d839Srin 	if (tcp) {
3295fb38d839Srin #ifdef IGC_DEBUG
3296fb38d839Srin 		KASSERTMSG(ipproto == IPPROTO_TCP, "ipproto = %d", ipproto);
3297fb38d839Srin #endif
3298d0d8f2a5Srin 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;
3299d0d8f2a5Srin 		*olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
3300d0d8f2a5Srin 		off = 1;
3301fb38d839Srin 	} else if (udp) {
3302fb38d839Srin #ifdef IGC_DEBUG
3303fb38d839Srin 		KASSERTMSG(ipproto == IPPROTO_UDP, "ipproto = %d", ipproto);
3304fb38d839Srin #endif
3305d0d8f2a5Srin 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP;
3306d0d8f2a5Srin 		*olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
3307d0d8f2a5Srin 		off = 1;
3308d0d8f2a5Srin 	}
3309d0d8f2a5Srin 
3310d0d8f2a5Srin 	if (off == 0)
3311d0d8f2a5Srin 		return 0;
3312d0d8f2a5Srin 
3313fb38d839Srin 	type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
3314fb38d839Srin 
3315d0d8f2a5Srin 	/* Now ready a context descriptor */
3316fb38d839Srin 	struct igc_adv_tx_context_desc *txdesc =
3317fb38d839Srin 	    (struct igc_adv_tx_context_desc *)&txr->tx_base[prod];
3318d0d8f2a5Srin 
3319d0d8f2a5Srin 	/* Now copy bits into descriptor */
3320fb38d839Srin 	igc_txdesc_sync(txr, prod,
3321fb38d839Srin 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3322d0d8f2a5Srin 	htolem32(&txdesc->vlan_macip_lens, vlan_macip_lens);
3323d0d8f2a5Srin 	htolem32(&txdesc->type_tucmd_mlhl, type_tucmd_mlhl);
3324d0d8f2a5Srin 	htolem32(&txdesc->seqnum_seed, 0);
3325d0d8f2a5Srin 	htolem32(&txdesc->mss_l4len_idx, 0);
3326fb38d839Srin 	igc_txdesc_sync(txr, prod,
3327fb38d839Srin 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3328d0d8f2a5Srin 
3329d0d8f2a5Srin 	return 1;
3330d0d8f2a5Srin }
3331d0d8f2a5Srin 
3332d0d8f2a5Srin /*********************************************************************
3333d0d8f2a5Srin  *
3334fb38d839Srin  *  Advanced Context Descriptor setup for TSO
3335fb38d839Srin  *
3336fb38d839Srin  *  XXX XXXRO
3337fb38d839Srin  *	Not working. Some packets are sent with correct csums, but
3338fb38d839Srin  *	others aren't. th->th_sum may be adjusted.
3339fb38d839Srin  *
3340fb38d839Srin  **********************************************************************/
3341fb38d839Srin 
3342fb38d839Srin static int
3343fb38d839Srin igc_tso_setup(struct tx_ring *txr, struct mbuf *mp, int prod,
3344fb38d839Srin     uint32_t *cmd_type_len, uint32_t *olinfo_status)
3345fb38d839Srin {
3346fb38d839Srin #if 1 /* notyet */
3347fb38d839Srin 	return 0;
3348fb38d839Srin #else
3349fb38d839Srin 	struct ether_vlan_header *evl;
3350fb38d839Srin 	struct ip *ip;
3351fb38d839Srin 	struct ip6_hdr *ip6;
3352fb38d839Srin 	struct tcphdr *th;
3353fb38d839Srin 	uint32_t type_tucmd_mlhl = 0;
3354fb38d839Srin 	uint32_t vlan_macip_lens = 0;
3355fb38d839Srin 	uint32_t mss_l4len_idx = 0;
3356fb38d839Srin 	uint32_t ehlen, iphlen, tcphlen, paylen;
3357fb38d839Srin 	uint16_t ehtype;
3358fb38d839Srin 
3359fb38d839Srin 	/*
3360fb38d839Srin 	 * In advanced descriptors the vlan tag must
3361fb38d839Srin 	 * be placed into the context descriptor. Hence
3362fb38d839Srin 	 * we need to make one even if not doing offloads.
3363fb38d839Srin 	 */
3364fb38d839Srin #if NVLAN > 0
3365fb38d839Srin 	if (vlan_has_tag(mp)) {
3366fb38d839Srin 		vlan_macip_lens |= (uint32_t)vlan_get_tag(mp)
3367fb38d839Srin 		    << IGC_ADVTXD_VLAN_SHIFT;
3368fb38d839Srin 	}
3369fb38d839Srin #endif
3370fb38d839Srin 
3371fb38d839Srin 	KASSERT(mp->m_len >= sizeof(struct ether_header));
3372fb38d839Srin 	evl = mtod(mp, struct ether_vlan_header *);
3373fb38d839Srin 	if (evl->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
3374fb38d839Srin 		KASSERT(mp->m_len >= sizeof(struct ether_vlan_header));
3375fb38d839Srin 		ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
3376fb38d839Srin 		ehtype = evl->evl_proto;
3377fb38d839Srin 	} else {
3378fb38d839Srin 		ehlen = ETHER_HDR_LEN;
3379fb38d839Srin 		ehtype = evl->evl_encap_proto;
3380fb38d839Srin 	}
3381fb38d839Srin 
3382fb38d839Srin 	vlan_macip_lens |= ehlen << IGC_ADVTXD_MACLEN_SHIFT;
3383fb38d839Srin 
3384fb38d839Srin 	switch (ntohs(ehtype)) {
3385fb38d839Srin 	case ETHERTYPE_IP:
3386fb38d839Srin 		iphlen = M_CSUM_DATA_IPv4_IPHL(mp->m_pkthdr.csum_data);
3387fb38d839Srin 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4;
3388fb38d839Srin 		*olinfo_status |= IGC_TXD_POPTS_IXSM << 8;
3389fb38d839Srin 
3390fb38d839Srin 		KASSERT(mp->m_len >= ehlen + sizeof(*ip));
3391fb38d839Srin 		ip = (void *)(mtod(mp, char *) + ehlen);
3392fb38d839Srin 		ip->ip_len = 0;
3393fb38d839Srin 		KASSERT(iphlen == ip->ip_hl << 2);
3394fb38d839Srin 		KASSERT(ip->ip_sum == 0);
3395fb38d839Srin 		KASSERT(ip->ip_p == IPPROTO_TCP);
3396fb38d839Srin 
3397fb38d839Srin 		KASSERT(mp->m_len >= ehlen + iphlen + sizeof(*th));
3398fb38d839Srin 		th = (void *)((char *)ip + iphlen);
3399fb38d839Srin 		th->th_sum = in_cksum_phdr(ip->ip_src.s_addr, ip->ip_dst.s_addr,
3400fb38d839Srin 		    htons(IPPROTO_TCP));
3401fb38d839Srin 		break;
3402fb38d839Srin 	case ETHERTYPE_IPV6:
3403fb38d839Srin 		iphlen = M_CSUM_DATA_IPv6_IPHL(mp->m_pkthdr.csum_data);
3404fb38d839Srin 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6;
3405fb38d839Srin 
3406fb38d839Srin 		KASSERT(mp->m_len >= ehlen + sizeof(*ip6));
3407fb38d839Srin 		ip6 = (void *)(mtod(mp, char *) + ehlen);
3408fb38d839Srin 		ip6->ip6_plen = 0;
3409fb38d839Srin 		KASSERT(iphlen == sizeof(struct ip6_hdr));
3410fb38d839Srin 		KASSERT(ip6->ip6_nxt == IPPROTO_TCP);
3411fb38d839Srin 
3412fb38d839Srin 		KASSERT(mp->m_len >= ehlen + iphlen + sizeof(*th));
3413fb38d839Srin 		th = (void *)((char *)ip6 + iphlen);
3414fb38d839Srin 		tcphlen = th->th_off << 2;
3415fb38d839Srin 		paylen = mp->m_pkthdr.len - ehlen - iphlen - tcphlen;
3416fb38d839Srin 		th->th_sum = in6_cksum_phdr(&ip6->ip6_src, &ip6->ip6_dst, 0,
3417fb38d839Srin 		    htonl(IPPROTO_TCP));
3418fb38d839Srin 		break;
3419fb38d839Srin 	default:
3420fb38d839Srin 		panic("%s", __func__);
3421fb38d839Srin 	}
3422fb38d839Srin 
3423fb38d839Srin 	tcphlen = th->th_off << 2;
3424fb38d839Srin 	paylen = mp->m_pkthdr.len - ehlen - iphlen - tcphlen;
3425fb38d839Srin 
3426fb38d839Srin 	vlan_macip_lens |= iphlen;
3427fb38d839Srin 
3428fb38d839Srin 	type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
3429fb38d839Srin 	type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;
3430fb38d839Srin 
3431fb38d839Srin 	mss_l4len_idx |= mp->m_pkthdr.segsz << IGC_ADVTXD_MSS_SHIFT;
3432fb38d839Srin 	mss_l4len_idx |= tcphlen << IGC_ADVTXD_L4LEN_SHIFT;
3433fb38d839Srin 
3434fb38d839Srin 	/* Now ready a context descriptor */
3435fb38d839Srin 	struct igc_adv_tx_context_desc *txdesc =
3436fb38d839Srin 	    (struct igc_adv_tx_context_desc *)&txr->tx_base[prod];
3437fb38d839Srin 
3438fb38d839Srin 	/* Now copy bits into descriptor */
3439fb38d839Srin 	igc_txdesc_sync(txr, prod,
3440fb38d839Srin 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3441fb38d839Srin 	htolem32(&txdesc->vlan_macip_lens, vlan_macip_lens);
3442fb38d839Srin 	htolem32(&txdesc->type_tucmd_mlhl, type_tucmd_mlhl);
3443fb38d839Srin 	htolem32(&txdesc->seqnum_seed, 0);
3444fb38d839Srin 	htolem32(&txdesc->mss_l4len_idx, mss_l4len_idx);
3445fb38d839Srin 	igc_txdesc_sync(txr, prod,
3446fb38d839Srin 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3447fb38d839Srin 
3448fb38d839Srin 	*cmd_type_len |= IGC_ADVTXD_DCMD_TSE;
3449fb38d839Srin 	*olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
3450fb38d839Srin 	*olinfo_status |= paylen << IGC_ADVTXD_PAYLEN_SHIFT;
3451fb38d839Srin 
3452fb38d839Srin 	return 1;
3453fb38d839Srin #endif /* notyet */
3454fb38d839Srin }
3455fb38d839Srin 
3456fb38d839Srin /*********************************************************************
3457fb38d839Srin  *
3458d0d8f2a5Srin  *  Allocate memory for rx_buffer structures. Since we use one
3459d0d8f2a5Srin  *  rx_buffer per received packet, the maximum number of rx_buffer's
3460d0d8f2a5Srin  *  that we'll need is equal to the number of receive descriptors
3461d0d8f2a5Srin  *  that we've allocated.
3462d0d8f2a5Srin  *
3463d0d8f2a5Srin  **********************************************************************/
3464fb38d839Srin static int
3465d0d8f2a5Srin igc_allocate_receive_buffers(struct rx_ring *rxr)
3466d0d8f2a5Srin {
3467d0d8f2a5Srin 	struct igc_softc *sc = rxr->sc;
3468fb38d839Srin 	int error;
3469d0d8f2a5Srin 
3470fb38d839Srin 	rxr->rx_buffers =
3471fb38d839Srin 	    kmem_zalloc(sc->num_rx_desc * sizeof(struct igc_rx_buf), KM_SLEEP);
3472d0d8f2a5Srin 
3473fb38d839Srin 	for (int id = 0; id < sc->num_rx_desc; id++) {
3474fb38d839Srin 		struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
3475fb38d839Srin 
3476fb38d839Srin 		error = bus_dmamap_create(rxr->rxdma.dma_tag, MCLBYTES, 1,
3477fb38d839Srin 		    MCLBYTES, 0, BUS_DMA_WAITOK, &rxbuf->map);
3478d0d8f2a5Srin 		if (error) {
3479fb38d839Srin 			aprint_error_dev(sc->sc_dev,
3480fb38d839Srin 			    "unable to create RX DMA map\n");
3481d0d8f2a5Srin 			goto fail;
3482d0d8f2a5Srin 		}
3483d0d8f2a5Srin 	}
3484d0d8f2a5Srin 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
3485d0d8f2a5Srin 	    rxr->rxdma.dma_map->dm_mapsize,
3486d0d8f2a5Srin 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3487d0d8f2a5Srin 
3488d0d8f2a5Srin 	return 0;
3489d0d8f2a5Srin  fail:
3490d0d8f2a5Srin 	return error;
3491d0d8f2a5Srin }
3492d0d8f2a5Srin 
3493d0d8f2a5Srin /*********************************************************************
3494d0d8f2a5Srin  *
3495d0d8f2a5Srin  *  Allocate and initialize receive structures.
3496d0d8f2a5Srin  *
3497d0d8f2a5Srin  **********************************************************************/
3498fb38d839Srin static int
3499d0d8f2a5Srin igc_setup_receive_structures(struct igc_softc *sc)
3500d0d8f2a5Srin {
3501d0d8f2a5Srin 
3502fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3503fb38d839Srin 		struct rx_ring *rxr = &sc->rx_rings[iq];
3504fb38d839Srin 
3505d0d8f2a5Srin 		if (igc_setup_receive_ring(rxr))
3506d0d8f2a5Srin 			goto fail;
3507d0d8f2a5Srin 	}
3508d0d8f2a5Srin 
3509d0d8f2a5Srin 	return 0;
3510d0d8f2a5Srin  fail:
3511d0d8f2a5Srin 	igc_free_receive_structures(sc);
3512d0d8f2a5Srin 	return ENOBUFS;
3513d0d8f2a5Srin }
3514d0d8f2a5Srin 
3515d0d8f2a5Srin /*********************************************************************
3516d0d8f2a5Srin  *
3517d0d8f2a5Srin  *  Initialize a receive ring and its buffers.
3518d0d8f2a5Srin  *
3519d0d8f2a5Srin  **********************************************************************/
3520fb38d839Srin static int
3521d0d8f2a5Srin igc_setup_receive_ring(struct rx_ring *rxr)
3522d0d8f2a5Srin {
3523d0d8f2a5Srin 	struct igc_softc *sc = rxr->sc;
3524fb38d839Srin 	const int rsize = roundup2(
3525fb38d839Srin 	    sc->num_rx_desc * sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN);
3526d0d8f2a5Srin 
3527d0d8f2a5Srin 	/* Clear the ring contents. */
3528fb38d839Srin 	memset(rxr->rx_base, 0, rsize);
3529d0d8f2a5Srin 
3530d0d8f2a5Srin 	if (igc_allocate_receive_buffers(rxr))
3531d0d8f2a5Srin 		return ENOMEM;
3532d0d8f2a5Srin 
3533d0d8f2a5Srin 	/* Setup our descriptor indices. */
3534d0d8f2a5Srin 	rxr->next_to_check = 0;
3535fb38d839Srin 	rxr->last_desc_filled = 0;
3536d0d8f2a5Srin 
3537fb38d839Srin 	mutex_init(&rxr->rxr_lock, MUTEX_DEFAULT, IPL_NET);
3538d0d8f2a5Srin 
3539d0d8f2a5Srin 	return 0;
3540d0d8f2a5Srin }
3541d0d8f2a5Srin 
3542d0d8f2a5Srin /*********************************************************************
3543d0d8f2a5Srin  *
3544d0d8f2a5Srin  *  Enable receive unit.
3545d0d8f2a5Srin  *
3546d0d8f2a5Srin  **********************************************************************/
3547fb38d839Srin static void
3548d0d8f2a5Srin igc_initialize_receive_unit(struct igc_softc *sc)
3549d0d8f2a5Srin {
3550fb38d839Srin 	struct ifnet *ifp = &sc->sc_ec.ec_if;
3551d0d8f2a5Srin 	struct igc_hw *hw = &sc->hw;
3552fb38d839Srin 	uint32_t rctl, rxcsum, srrctl;
3553fb38d839Srin 
3554fb38d839Srin 	DPRINTF(RX, "called\n");
3555d0d8f2a5Srin 
3556d0d8f2a5Srin 	/*
3557d0d8f2a5Srin 	 * Make sure receives are disabled while setting
3558d0d8f2a5Srin 	 * up the descriptor ring.
3559d0d8f2a5Srin 	 */
3560d0d8f2a5Srin 	rctl = IGC_READ_REG(hw, IGC_RCTL);
3561d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
3562d0d8f2a5Srin 
3563d0d8f2a5Srin 	/* Setup the Receive Control Register */
3564d0d8f2a5Srin 	rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
3565d0d8f2a5Srin 	rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO |
3566d0d8f2a5Srin 	    IGC_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
3567d0d8f2a5Srin 
3568fb38d839Srin #if 1
3569d0d8f2a5Srin 	/* Do not store bad packets */
3570d0d8f2a5Srin 	rctl &= ~IGC_RCTL_SBP;
3571fb38d839Srin #else
3572fb38d839Srin 	/* for debug */
3573fb38d839Srin 	rctl |= IGC_RCTL_SBP;
3574fb38d839Srin #endif
3575d0d8f2a5Srin 
3576d0d8f2a5Srin 	/* Enable Long Packet receive */
3577fb38d839Srin 	if (sc->hw.mac.max_frame_size > ETHER_MAX_LEN)
3578d0d8f2a5Srin 		rctl |= IGC_RCTL_LPE;
3579fb38d839Srin 	else
3580fb38d839Srin 		rctl &= ~IGC_RCTL_LPE;
3581d0d8f2a5Srin 
3582d0d8f2a5Srin 	/* Strip the CRC */
3583d0d8f2a5Srin 	rctl |= IGC_RCTL_SECRC;
3584d0d8f2a5Srin 
3585d0d8f2a5Srin 	/*
3586d0d8f2a5Srin 	 * Set the interrupt throttling rate. Value is calculated
3587d0d8f2a5Srin 	 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
3588fb38d839Srin 	 *
3589fb38d839Srin 	 * XXX Sync with Linux, especially for jumbo MTU or TSO.
3590fb38d839Srin 	 * XXX Shouldn't be here?
3591d0d8f2a5Srin 	 */
3592d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_ITR, DEFAULT_ITR);
3593d0d8f2a5Srin 
3594d0d8f2a5Srin 	rxcsum = IGC_READ_REG(hw, IGC_RXCSUM);
3595fb38d839Srin 	rxcsum &= ~(IGC_RXCSUM_IPOFL | IGC_RXCSUM_TUOFL | IGC_RXCSUM_PCSD);
3596fb38d839Srin 	if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx)
3597fb38d839Srin 		rxcsum |= IGC_RXCSUM_IPOFL;
3598fb38d839Srin 	if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx |
3599fb38d839Srin 				 IFCAP_CSUM_TCPv6_Rx | IFCAP_CSUM_UDPv6_Rx))
3600fb38d839Srin 		rxcsum |= IGC_RXCSUM_TUOFL;
3601d0d8f2a5Srin 	if (sc->sc_nqueues > 1)
3602d0d8f2a5Srin 		rxcsum |= IGC_RXCSUM_PCSD;
3603d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
3604d0d8f2a5Srin 
3605d0d8f2a5Srin 	if (sc->sc_nqueues > 1)
3606d0d8f2a5Srin 		igc_initialize_rss_mapping(sc);
3607d0d8f2a5Srin 
3608fb38d839Srin 	srrctl = 0;
3609d0d8f2a5Srin #if 0
3610d0d8f2a5Srin 	srrctl |= 4096 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
3611d0d8f2a5Srin 	rctl |= IGC_RCTL_SZ_4096 | IGC_RCTL_BSEX;
3612fb38d839Srin #else
3613d0d8f2a5Srin 	srrctl |= 2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
3614d0d8f2a5Srin 	rctl |= IGC_RCTL_SZ_2048;
3615fb38d839Srin #endif
3616d0d8f2a5Srin 
3617d0d8f2a5Srin 	/*
3618d0d8f2a5Srin 	 * If TX flow control is disabled and there's > 1 queue defined,
3619d0d8f2a5Srin 	 * enable DROP.
3620d0d8f2a5Srin 	 *
3621d0d8f2a5Srin 	 * This drops frames rather than hanging the RX MAC for all queues.
3622d0d8f2a5Srin 	 */
3623fb38d839Srin 	if (sc->sc_nqueues > 1 &&
3624fb38d839Srin 	    (sc->fc == igc_fc_none || sc->fc == igc_fc_rx_pause))
3625d0d8f2a5Srin 		srrctl |= IGC_SRRCTL_DROP_EN;
3626d0d8f2a5Srin 
3627d0d8f2a5Srin 	/* Setup the Base and Length of the RX descriptor rings. */
3628fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3629fb38d839Srin 		struct rx_ring *rxr = &sc->rx_rings[iq];
3630fb38d839Srin 		const uint64_t bus_addr =
3631fb38d839Srin 		    rxr->rxdma.dma_map->dm_segs[0].ds_addr;
3632fb38d839Srin 
3633fb38d839Srin 		IGC_WRITE_REG(hw, IGC_RXDCTL(iq), 0);
3634d0d8f2a5Srin 
3635d0d8f2a5Srin 		srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
3636d0d8f2a5Srin 
3637fb38d839Srin 		IGC_WRITE_REG(hw, IGC_RDLEN(iq),
3638d0d8f2a5Srin 		    sc->num_rx_desc * sizeof(union igc_adv_rx_desc));
3639fb38d839Srin 		IGC_WRITE_REG(hw, IGC_RDBAH(iq), (uint32_t)(bus_addr >> 32));
3640fb38d839Srin 		IGC_WRITE_REG(hw, IGC_RDBAL(iq), (uint32_t)bus_addr);
3641fb38d839Srin 		IGC_WRITE_REG(hw, IGC_SRRCTL(iq), srrctl);
3642d0d8f2a5Srin 
3643d0d8f2a5Srin 		/* Setup the Head and Tail Descriptor Pointers */
3644fb38d839Srin 		IGC_WRITE_REG(hw, IGC_RDH(iq), 0);
3645fb38d839Srin 		IGC_WRITE_REG(hw, IGC_RDT(iq), 0 /* XXX rxr->last_desc_filled */);
3646d0d8f2a5Srin 
3647d0d8f2a5Srin 		/* Enable this Queue */
3648fb38d839Srin 		uint32_t rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(iq));
3649d0d8f2a5Srin 		rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
3650d0d8f2a5Srin 		rxdctl &= 0xFFF00000;
3651d0d8f2a5Srin 		rxdctl |= IGC_RX_PTHRESH;
3652d0d8f2a5Srin 		rxdctl |= IGC_RX_HTHRESH << 8;
3653d0d8f2a5Srin 		rxdctl |= IGC_RX_WTHRESH << 16;
3654fb38d839Srin 		IGC_WRITE_REG(hw, IGC_RXDCTL(iq), rxdctl);
3655d0d8f2a5Srin 	}
3656d0d8f2a5Srin 
3657d0d8f2a5Srin 	/* Make sure VLAN Filters are off */
3658d0d8f2a5Srin 	rctl &= ~IGC_RCTL_VFE;
3659d0d8f2a5Srin 
3660d0d8f2a5Srin 	/* Write out the settings */
3661d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
3662d0d8f2a5Srin }
3663d0d8f2a5Srin 
3664d0d8f2a5Srin /*********************************************************************
3665d0d8f2a5Srin  *
3666d0d8f2a5Srin  *  Free all receive rings.
3667d0d8f2a5Srin  *
3668d0d8f2a5Srin  **********************************************************************/
3669fb38d839Srin static void
3670d0d8f2a5Srin igc_free_receive_structures(struct igc_softc *sc)
3671d0d8f2a5Srin {
3672d0d8f2a5Srin 
3673fb38d839Srin 	for (int iq = 0; iq < sc->sc_nqueues; iq++) {
3674fb38d839Srin 		struct rx_ring *rxr = &sc->rx_rings[iq];
3675d0d8f2a5Srin 
3676d0d8f2a5Srin 		igc_free_receive_buffers(rxr);
3677d0d8f2a5Srin 	}
3678fb38d839Srin }
3679d0d8f2a5Srin 
3680d0d8f2a5Srin /*********************************************************************
3681d0d8f2a5Srin  *
3682d0d8f2a5Srin  *  Free receive ring data structures
3683d0d8f2a5Srin  *
3684d0d8f2a5Srin  **********************************************************************/
3685fb38d839Srin static void
3686d0d8f2a5Srin igc_free_receive_buffers(struct rx_ring *rxr)
3687d0d8f2a5Srin {
3688d0d8f2a5Srin 	struct igc_softc *sc = rxr->sc;
3689d0d8f2a5Srin 
3690d0d8f2a5Srin 	if (rxr->rx_buffers != NULL) {
3691fb38d839Srin 		for (int id = 0; id < sc->num_rx_desc; id++) {
3692fb38d839Srin 			struct igc_rx_buf *rxbuf = &rxr->rx_buffers[id];
3693fb38d839Srin 			bus_dmamap_t map = rxbuf->map;
3694fb38d839Srin 
3695d0d8f2a5Srin 			if (rxbuf->buf != NULL) {
3696fb38d839Srin 				bus_dmamap_sync(rxr->rxdma.dma_tag, map,
3697fb38d839Srin 				    0, map->dm_mapsize, BUS_DMASYNC_POSTREAD);
3698fb38d839Srin 				bus_dmamap_unload(rxr->rxdma.dma_tag, map);
3699d0d8f2a5Srin 				m_freem(rxbuf->buf);
3700d0d8f2a5Srin 				rxbuf->buf = NULL;
3701d0d8f2a5Srin 			}
3702fb38d839Srin 			bus_dmamap_destroy(rxr->rxdma.dma_tag, map);
3703d0d8f2a5Srin 			rxbuf->map = NULL;
3704d0d8f2a5Srin 		}
3705fb38d839Srin 		kmem_free(rxr->rx_buffers,
3706d0d8f2a5Srin 		    sc->num_rx_desc * sizeof(struct igc_rx_buf));
3707d0d8f2a5Srin 		rxr->rx_buffers = NULL;
3708d0d8f2a5Srin 	}
3709fb38d839Srin 
3710fb38d839Srin 	mutex_destroy(&rxr->rxr_lock);
3711fb38d839Srin }
3712fb38d839Srin 
3713fb38d839Srin /*********************************************************************
3714fb38d839Srin  *
3715fb38d839Srin  * Clear status registers in all RX descriptors.
3716fb38d839Srin  *
3717fb38d839Srin  **********************************************************************/
3718fb38d839Srin static void
3719fb38d839Srin igc_clear_receive_status(struct rx_ring *rxr)
3720fb38d839Srin {
3721fb38d839Srin 	struct igc_softc *sc = rxr->sc;
3722fb38d839Srin 
3723fb38d839Srin 	mutex_enter(&rxr->rxr_lock);
3724fb38d839Srin 
3725fb38d839Srin 	for (int id = 0; id < sc->num_rx_desc; id++) {
3726fb38d839Srin 		union igc_adv_rx_desc *rxdesc = &rxr->rx_base[id];
3727fb38d839Srin 
3728fb38d839Srin 		igc_rxdesc_sync(rxr, id,
3729fb38d839Srin 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
3730fb38d839Srin 		rxdesc->wb.upper.status_error = 0;
3731fb38d839Srin 		igc_rxdesc_sync(rxr, id,
3732fb38d839Srin 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3733fb38d839Srin 	}
3734fb38d839Srin 
3735fb38d839Srin 	mutex_exit(&rxr->rxr_lock);
3736d0d8f2a5Srin }
3737d0d8f2a5Srin 
3738d0d8f2a5Srin /*
3739d0d8f2a5Srin  * Initialise the RSS mapping for NICs that support multiple transmit/
3740d0d8f2a5Srin  * receive rings.
3741d0d8f2a5Srin  */
3742fb38d839Srin static void
3743d0d8f2a5Srin igc_initialize_rss_mapping(struct igc_softc *sc)
3744d0d8f2a5Srin {
3745d0d8f2a5Srin 	struct igc_hw *hw = &sc->hw;
3746d0d8f2a5Srin 
3747d0d8f2a5Srin 	/*
3748d0d8f2a5Srin 	 * The redirection table controls which destination
3749d0d8f2a5Srin 	 * queue each bucket redirects traffic to.
3750d0d8f2a5Srin 	 * Each DWORD represents four queues, with the LSB
3751d0d8f2a5Srin 	 * being the first queue in the DWORD.
3752d0d8f2a5Srin 	 *
3753d0d8f2a5Srin 	 * This just allocates buckets to queues using round-robin
3754d0d8f2a5Srin 	 * allocation.
3755d0d8f2a5Srin 	 *
3756d0d8f2a5Srin 	 * NOTE: It Just Happens to line up with the default
3757d0d8f2a5Srin 	 * RSS allocation method.
3758d0d8f2a5Srin 	 */
3759d0d8f2a5Srin 
3760d0d8f2a5Srin 	/* Warning FM follows */
3761fb38d839Srin 	uint32_t reta = 0;
3762fb38d839Srin 	for (int i = 0; i < 128; i++) {
3763fb38d839Srin 		const int shift = 0; /* XXXRO */
3764fb38d839Srin 		int queue_id = i % sc->sc_nqueues;
3765d0d8f2a5Srin 		/* Adjust if required */
3766fb38d839Srin 		queue_id <<= shift;
3767d0d8f2a5Srin 
3768d0d8f2a5Srin 		/*
3769d0d8f2a5Srin 		 * The low 8 bits are for hash value (n+0);
3770d0d8f2a5Srin 		 * The next 8 bits are for hash value (n+1), etc.
3771d0d8f2a5Srin 		 */
3772fb38d839Srin 		reta >>= 8;
3773fb38d839Srin 		reta |= ((uint32_t)queue_id) << 24;
3774d0d8f2a5Srin 		if ((i & 3) == 3) {
3775d0d8f2a5Srin 			IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta);
3776d0d8f2a5Srin 			reta = 0;
3777d0d8f2a5Srin 		}
3778d0d8f2a5Srin 	}
3779d0d8f2a5Srin 
3780d0d8f2a5Srin 	/*
3781d0d8f2a5Srin 	 * MRQC: Multiple Receive Queues Command
3782d0d8f2a5Srin 	 * Set queuing to RSS control, number depends on the device.
3783d0d8f2a5Srin 	 */
3784d0d8f2a5Srin 
3785d0d8f2a5Srin 	/* Set up random bits */
3786fb38d839Srin 	uint32_t rss_key[RSS_KEYSIZE / sizeof(uint32_t)];
3787fb38d839Srin 	rss_getkey((uint8_t *)rss_key);
3788d0d8f2a5Srin 
3789d0d8f2a5Srin 	/* Now fill our hash function seeds */
3790fb38d839Srin 	for (int i = 0; i < __arraycount(rss_key); i++)
3791d0d8f2a5Srin 		IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]);
3792d0d8f2a5Srin 
3793d0d8f2a5Srin 	/*
3794d0d8f2a5Srin 	 * Configure the RSS fields to hash upon.
3795d0d8f2a5Srin 	 */
3796fb38d839Srin 	uint32_t mrqc = IGC_MRQC_ENABLE_RSS_4Q;
3797fb38d839Srin 	mrqc |= IGC_MRQC_RSS_FIELD_IPV4 | IGC_MRQC_RSS_FIELD_IPV4_TCP;
3798fb38d839Srin 	mrqc |= IGC_MRQC_RSS_FIELD_IPV6 | IGC_MRQC_RSS_FIELD_IPV6_TCP;
3799d0d8f2a5Srin 	mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
3800d0d8f2a5Srin 
3801d0d8f2a5Srin 	IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
3802d0d8f2a5Srin }
3803d0d8f2a5Srin 
3804d0d8f2a5Srin /*
3805d0d8f2a5Srin  * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
3806d0d8f2a5Srin  * For ASF and Pass Through versions of f/w this means
3807d0d8f2a5Srin  * that the driver is loaded. For AMT version type f/w
3808d0d8f2a5Srin  * this means that the network i/f is open.
3809d0d8f2a5Srin  */
3810fb38d839Srin static void
3811d0d8f2a5Srin igc_get_hw_control(struct igc_softc *sc)
3812d0d8f2a5Srin {
3813fb38d839Srin 	const uint32_t ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
3814d0d8f2a5Srin 
3815d0d8f2a5Srin 	IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
3816d0d8f2a5Srin }
3817d0d8f2a5Srin 
3818d0d8f2a5Srin /*
3819d0d8f2a5Srin  * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
3820d0d8f2a5Srin  * For ASF and Pass Through versions of f/w this means that
3821d0d8f2a5Srin  * the driver is no longer loaded. For AMT versions of the
3822d0d8f2a5Srin  * f/w this means that the network i/f is closed.
3823d0d8f2a5Srin  */
3824fb38d839Srin static void
3825d0d8f2a5Srin igc_release_hw_control(struct igc_softc *sc)
3826d0d8f2a5Srin {
3827fb38d839Srin 	const uint32_t ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
3828d0d8f2a5Srin 
3829d0d8f2a5Srin 	IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
3830d0d8f2a5Srin }
3831d0d8f2a5Srin 
3832fb38d839Srin static int
3833d0d8f2a5Srin igc_is_valid_ether_addr(uint8_t *addr)
3834d0d8f2a5Srin {
3835fb38d839Srin 	const char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
3836d0d8f2a5Srin 
3837fb38d839Srin 	if ((addr[0] & 1) || !bcmp(addr, zero_addr, ETHER_ADDR_LEN))
3838d0d8f2a5Srin 		return 0;
3839d0d8f2a5Srin 
3840d0d8f2a5Srin 	return 1;
3841d0d8f2a5Srin }
3842fb38d839Srin 
3843fb38d839Srin static void
3844fb38d839Srin igc_print_devinfo(struct igc_softc *sc)
3845fb38d839Srin {
3846fb38d839Srin 	device_t dev = sc->sc_dev;
3847fb38d839Srin 	struct igc_hw *hw = &sc->hw;
3848fb38d839Srin 	struct igc_phy_info *phy = &hw->phy;
3849fb38d839Srin 	u_int oui, model, rev;
38501be67e82Smsaitoh 	uint16_t id1, id2, nvm_ver, phy_ver, etk_lo, etk_hi;
3851fb38d839Srin 	char descr[MII_MAX_DESCR_LEN];
3852fb38d839Srin 
3853fb38d839Srin 	/* Print PHY Info */
3854fb38d839Srin 	id1 = phy->id >> 16;
3855fb38d839Srin 	/* The revision field in phy->id is cleard and it's in phy->revision */
3856fb38d839Srin 	id2 = (phy->id & 0xfff0) | phy->revision;
3857fb38d839Srin 	oui = MII_OUI(id1, id2);
3858fb38d839Srin 	model = MII_MODEL(id2);
3859fb38d839Srin 	rev = MII_REV(id2);
3860fb38d839Srin 	mii_get_descr(descr, sizeof(descr), oui, model);
3861fb38d839Srin 	if (descr[0])
386206e5fbf4Smsaitoh 		aprint_normal_dev(dev, "PHY: %s, rev. %d",
3863fb38d839Srin 		    descr, rev);
3864fb38d839Srin 	else
3865fb38d839Srin 		aprint_normal_dev(dev,
386606e5fbf4Smsaitoh 		    "PHY OUI 0x%06x, model 0x%04x, rev. %d",
3867fb38d839Srin 		    oui, model, rev);
3868fb38d839Srin 
386906e5fbf4Smsaitoh 	/* PHY FW version */
387006e5fbf4Smsaitoh 	phy->ops.read_reg(hw, 0x1e, &phy_ver);
387106e5fbf4Smsaitoh 	aprint_normal(", PHY FW version 0x%04hx\n", phy_ver);
387206e5fbf4Smsaitoh 
38731be67e82Smsaitoh 	/* NVM version */
3874fb38d839Srin 	hw->nvm.ops.read(hw, NVM_VERSION, 1, &nvm_ver);
3875fb38d839Srin 
38761be67e82Smsaitoh 	/* EtrackID */
38771be67e82Smsaitoh 	hw->nvm.ops.read(hw, NVM_ETKID_LO, 1, &etk_lo);
38781be67e82Smsaitoh 	hw->nvm.ops.read(hw, NVM_ETKID_HI, 1, &etk_hi);
38791be67e82Smsaitoh 
38801be67e82Smsaitoh 	aprint_normal_dev(dev,
38811be67e82Smsaitoh 	    "NVM image version %x.%02x, EtrackID %04hx%04hx\n",
3882fb38d839Srin 	    (nvm_ver & NVM_VERSION_MAJOR) >> NVM_VERSION_MAJOR_SHIFT,
38831be67e82Smsaitoh 	    nvm_ver & NVM_VERSION_MINOR, etk_hi, etk_lo);
3884fb38d839Srin }
3885