xref: /openbsd-src/sys/dev/pci/if_igc.c (revision 2011187a052fd12d770845f84390408929b7ba9c)
1 /*	$OpenBSD: if_igc.c,v 1.21 2024/05/04 13:35:26 mbuhl Exp $	*/
2 /*-
3  * SPDX-License-Identifier: BSD-2-Clause
4  *
5  * Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
6  * All rights reserved.
7  * Copyright (c) 2021 Rubicon Communications, LLC (Netgate)
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include "bpfilter.h"
32 #include "vlan.h"
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sockio.h>
37 #include <sys/mbuf.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 #include <sys/socket.h>
41 #include <sys/device.h>
42 #include <sys/endian.h>
43 #include <sys/intrmap.h>
44 
45 #include <net/if.h>
46 #include <net/if_media.h>
47 #include <net/route.h>
48 #include <net/toeplitz.h>
49 
50 #include <netinet/in.h>
51 #include <netinet/if_ether.h>
52 #include <netinet/tcp.h>
53 #include <netinet/tcp_timer.h>
54 #include <netinet/tcp_var.h>
55 
56 #if NBPFILTER > 0
57 #include <net/bpf.h>
58 #endif
59 
60 #include <machine/bus.h>
61 #include <machine/intr.h>
62 
63 #include <dev/pci/pcivar.h>
64 #include <dev/pci/pcireg.h>
65 #include <dev/pci/pcidevs.h>
66 #include <dev/pci/if_igc.h>
67 #include <dev/pci/igc_hw.h>
68 
69 const struct pci_matchid igc_devices[] = {
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I220_V },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I221_V },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_BLANK_NVM },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_I },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_IT },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_K2 },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LM },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_LMVP },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I225_V },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_BLANK_NVM },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_IT },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_LM },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_K },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_I226_V }
85 };
86 
87 /*********************************************************************
88  *  Function Prototypes
89  *********************************************************************/
90 int	igc_match(struct device *, void *, void *);
91 void	igc_attach(struct device *, struct device *, void *);
92 int	igc_detach(struct device *, int);
93 
94 void	igc_identify_hardware(struct igc_softc *);
95 int	igc_allocate_pci_resources(struct igc_softc *);
96 int	igc_allocate_queues(struct igc_softc *);
97 void	igc_free_pci_resources(struct igc_softc *);
98 void	igc_reset(struct igc_softc *);
99 void	igc_init_dmac(struct igc_softc *, uint32_t);
100 int	igc_allocate_msix(struct igc_softc *);
101 void	igc_setup_msix(struct igc_softc *);
102 int	igc_dma_malloc(struct igc_softc *, bus_size_t, struct igc_dma_alloc *);
103 void	igc_dma_free(struct igc_softc *, struct igc_dma_alloc *);
104 void	igc_setup_interface(struct igc_softc *);
105 
106 void	igc_init(void *);
107 void	igc_start(struct ifqueue *);
108 int	igc_txeof(struct tx_ring *);
109 void	igc_stop(struct igc_softc *);
110 int	igc_ioctl(struct ifnet *, u_long, caddr_t);
111 int	igc_rxrinfo(struct igc_softc *, struct if_rxrinfo *);
112 int	igc_rxfill(struct rx_ring *);
113 void	igc_rxrefill(void *);
114 int	igc_rxeof(struct rx_ring *);
115 void	igc_rx_checksum(uint32_t, struct mbuf *, uint32_t);
116 void	igc_watchdog(struct ifnet *);
117 void	igc_media_status(struct ifnet *, struct ifmediareq *);
118 int	igc_media_change(struct ifnet *);
119 void	igc_iff(struct igc_softc *);
120 void	igc_update_link_status(struct igc_softc *);
121 int	igc_get_buf(struct rx_ring *, int);
122 int	igc_tx_ctx_setup(struct tx_ring *, struct mbuf *, int, uint32_t *,
123 	    uint32_t *);
124 
125 void	igc_configure_queues(struct igc_softc *);
126 void	igc_set_queues(struct igc_softc *, uint32_t, uint32_t, int);
127 void	igc_enable_queue(struct igc_softc *, uint32_t);
128 void	igc_enable_intr(struct igc_softc *);
129 void	igc_disable_intr(struct igc_softc *);
130 int	igc_intr_link(void *);
131 int	igc_intr_queue(void *);
132 
133 int	igc_allocate_transmit_buffers(struct tx_ring *);
134 int	igc_setup_transmit_structures(struct igc_softc *);
135 int	igc_setup_transmit_ring(struct tx_ring *);
136 void	igc_initialize_transmit_unit(struct igc_softc *);
137 void	igc_free_transmit_structures(struct igc_softc *);
138 void	igc_free_transmit_buffers(struct tx_ring *);
139 int	igc_allocate_receive_buffers(struct rx_ring *);
140 int	igc_setup_receive_structures(struct igc_softc *);
141 int	igc_setup_receive_ring(struct rx_ring *);
142 void	igc_initialize_receive_unit(struct igc_softc *);
143 void	igc_free_receive_structures(struct igc_softc *);
144 void	igc_free_receive_buffers(struct rx_ring *);
145 void	igc_initialize_rss_mapping(struct igc_softc *);
146 
147 void	igc_get_hw_control(struct igc_softc *);
148 void	igc_release_hw_control(struct igc_softc *);
149 int	igc_is_valid_ether_addr(uint8_t *);
150 
151 /*********************************************************************
152  *  OpenBSD Device Interface Entry Points
153  *********************************************************************/
154 
155 struct cfdriver igc_cd = {
156 	NULL, "igc", DV_IFNET
157 };
158 
159 const struct cfattach igc_ca = {
160 	sizeof(struct igc_softc), igc_match, igc_attach, igc_detach
161 };
162 
163 /*********************************************************************
164  *  Device identification routine
165  *
166  *  igc_match determines if the driver should be loaded on
167  *  adapter based on PCI vendor/device id of the adapter.
168  *
169  *  return 0 on success, positive on failure
170  *********************************************************************/
171 int
172 igc_match(struct device *parent, void *match, void *aux)
173 {
174 	return pci_matchbyid((struct pci_attach_args *)aux, igc_devices,
175 	    nitems(igc_devices));
176 }
177 
178 /*********************************************************************
179  *  Device initialization routine
180  *
181  *  The attach entry point is called when the driver is being loaded.
182  *  This routine identifies the type of hardware, allocates all resources
183  *  and initializes the hardware.
184  *
185  *  return 0 on success, positive on failure
186  *********************************************************************/
187 void
188 igc_attach(struct device *parent, struct device *self, void *aux)
189 {
190 	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
191 	struct igc_softc *sc = (struct igc_softc *)self;
192 	struct igc_hw *hw = &sc->hw;
193 
194 	sc->osdep.os_sc = sc;
195 	sc->osdep.os_pa = *pa;
196 
197 	/* Determine hardware and mac info */
198 	igc_identify_hardware(sc);
199 
200 	sc->num_tx_desc = IGC_DEFAULT_TXD;
201 	sc->num_rx_desc = IGC_DEFAULT_RXD;
202 
203 	 /* Setup PCI resources */
204 	if (igc_allocate_pci_resources(sc))
205 		 goto err_pci;
206 
207 	/* Allocate TX/RX queues */
208 	if (igc_allocate_queues(sc))
209 		 goto err_pci;
210 
211 	/* Do shared code initialization */
212 	if (igc_setup_init_funcs(hw, true)) {
213 		printf(": Setup of shared code failed\n");
214 		goto err_pci;
215 	}
216 
217 	hw->mac.autoneg = DO_AUTO_NEG;
218 	hw->phy.autoneg_wait_to_complete = false;
219 	hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
220 
221 	/* Copper options. */
222 	if (hw->phy.media_type == igc_media_type_copper)
223 		hw->phy.mdix = AUTO_ALL_MODES;
224 
225 	/* Set the max frame size. */
226 	sc->hw.mac.max_frame_size = 9234;
227 
228 	/* Allocate multicast array memory. */
229 	sc->mta = mallocarray(ETHER_ADDR_LEN, MAX_NUM_MULTICAST_ADDRESSES,
230 	    M_DEVBUF, M_NOWAIT);
231 	if (sc->mta == NULL) {
232 		printf(": Can not allocate multicast setup array\n");
233 		goto err_late;
234 	}
235 
236 	/* Check SOL/IDER usage. */
237 	if (igc_check_reset_block(hw))
238 		printf(": PHY reset is blocked due to SOL/IDER session\n");
239 
240 	/* Disable Energy Efficient Ethernet. */
241 	sc->hw.dev_spec._i225.eee_disable = true;
242 
243 	igc_reset_hw(hw);
244 
245 	/* Make sure we have a good EEPROM before we read from it. */
246 	if (igc_validate_nvm_checksum(hw) < 0) {
247 		/*
248 		 * Some PCI-E parts fail the first check due to
249 		 * the link being in sleep state, call it again,
250 		 * if it fails a second time its a real issue.
251 		 */
252 		if (igc_validate_nvm_checksum(hw) < 0) {
253 			printf(": The EEPROM checksum is not valid\n");
254 			goto err_late;
255 		}
256 	}
257 
258 	/* Copy the permanent MAC address out of the EEPROM. */
259 	if (igc_read_mac_addr(hw) < 0) {
260 		printf(": EEPROM read error while reading MAC address\n");
261 		goto err_late;
262 	}
263 
264 	if (!igc_is_valid_ether_addr(hw->mac.addr)) {
265 		printf(": Invalid MAC address\n");
266 		goto err_late;
267 	}
268 
269 	memcpy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN);
270 
271 	if (igc_allocate_msix(sc))
272 		goto err_late;
273 
274 	/* Setup OS specific network interface. */
275 	igc_setup_interface(sc);
276 
277 	igc_reset(sc);
278 	hw->mac.get_link_status = true;
279 	igc_update_link_status(sc);
280 
281 	/* The driver can now take control from firmware. */
282 	igc_get_hw_control(sc);
283 
284 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
285 	return;
286 
287 err_late:
288 	igc_release_hw_control(sc);
289 err_pci:
290 	igc_free_pci_resources(sc);
291 	free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
292 }
293 
294 /*********************************************************************
295  *  Device removal routine
296  *
297  *  The detach entry point is called when the driver is being removed.
298  *  This routine stops the adapter and deallocates all the resources
299  *  that were allocated for driver operation.
300  *
301  *  return 0 on success, positive on failure
302  *********************************************************************/
303 int
304 igc_detach(struct device *self, int flags)
305 {
306 	struct igc_softc *sc = (struct igc_softc *)self;
307 	struct ifnet *ifp = &sc->sc_ac.ac_if;
308 
309 	igc_stop(sc);
310 
311 	igc_phy_hw_reset(&sc->hw);
312 	igc_release_hw_control(sc);
313 
314 	ether_ifdetach(ifp);
315 	if_detach(ifp);
316 
317 	igc_free_pci_resources(sc);
318 
319 	igc_free_transmit_structures(sc);
320 	igc_free_receive_structures(sc);
321 	free(sc->mta, M_DEVBUF, ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
322 
323 	return 0;
324 }
325 
326 void
327 igc_identify_hardware(struct igc_softc *sc)
328 {
329 	struct igc_osdep *os = &sc->osdep;
330 	struct pci_attach_args *pa = &os->os_pa;
331 
332 	/* Save off the information about this board. */
333 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
334 
335 	/* Do shared code init and setup. */
336 	if (igc_set_mac_type(&sc->hw)) {
337 		printf(": Setup init failure\n");
338 		return;
339         }
340 }
341 
342 int
343 igc_allocate_pci_resources(struct igc_softc *sc)
344 {
345 	struct igc_osdep *os = &sc->osdep;
346 	struct pci_attach_args *pa = &os->os_pa;
347 	pcireg_t memtype;
348 
349 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IGC_PCIREG);
350 	if (pci_mapreg_map(pa, IGC_PCIREG, memtype, 0, &os->os_memt,
351 	    &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
352 		printf(": unable to map registers\n");
353 		return ENXIO;
354 	}
355 	sc->hw.hw_addr = (uint8_t *)os->os_membase;
356 	sc->hw.back = os;
357 
358 	igc_setup_msix(sc);
359 
360 	return 0;
361 }
362 
363 int
364 igc_allocate_queues(struct igc_softc *sc)
365 {
366 	struct igc_queue *iq;
367 	struct tx_ring *txr;
368 	struct rx_ring *rxr;
369 	int i, rsize, rxconf, tsize, txconf;
370 
371 	/* Allocate the top level queue structs. */
372 	sc->queues = mallocarray(sc->sc_nqueues, sizeof(struct igc_queue),
373 	    M_DEVBUF, M_NOWAIT | M_ZERO);
374 	if (sc->queues == NULL) {
375 		printf("%s: unable to allocate queue\n", DEVNAME(sc));
376 		goto fail;
377 	}
378 
379 	/* Allocate the TX ring. */
380 	sc->tx_rings = mallocarray(sc->sc_nqueues, sizeof(struct tx_ring),
381 	    M_DEVBUF, M_NOWAIT | M_ZERO);
382 	if (sc->tx_rings == NULL) {
383 		printf("%s: unable to allocate TX ring\n", DEVNAME(sc));
384 		goto fail;
385 	}
386 
387 	/* Allocate the RX ring. */
388 	sc->rx_rings = mallocarray(sc->sc_nqueues, sizeof(struct rx_ring),
389 	    M_DEVBUF, M_NOWAIT | M_ZERO);
390 	if (sc->rx_rings == NULL) {
391 		printf("%s: unable to allocate RX ring\n", DEVNAME(sc));
392 		goto rx_fail;
393 	}
394 
395 	txconf = rxconf = 0;
396 
397 	/* Set up the TX queues. */
398 	tsize = roundup2(sc->num_tx_desc * sizeof(union igc_adv_tx_desc),
399 	    IGC_DBA_ALIGN);
400 	for (i = 0; i < sc->sc_nqueues; i++, txconf++) {
401 		txr = &sc->tx_rings[i];
402 		txr->sc = sc;
403 		txr->me = i;
404 
405 		if (igc_dma_malloc(sc, tsize, &txr->txdma)) {
406 			printf("%s: unable to allocate TX descriptor\n",
407 			    DEVNAME(sc));
408 			goto err_tx_desc;
409 		}
410 		txr->tx_base = (union igc_adv_tx_desc *)txr->txdma.dma_vaddr;
411 		bzero((void *)txr->tx_base, tsize);
412 	}
413 
414 	/* Set up the RX queues. */
415 	rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc),
416 	    IGC_DBA_ALIGN);
417 	for (i = 0; i < sc->sc_nqueues; i++, rxconf++) {
418 		rxr = &sc->rx_rings[i];
419 		rxr->sc = sc;
420 		rxr->me = i;
421 		timeout_set(&rxr->rx_refill, igc_rxrefill, rxr);
422 
423 		if (igc_dma_malloc(sc, rsize, &rxr->rxdma)) {
424 			printf("%s: unable to allocate RX descriptor\n",
425 			    DEVNAME(sc));
426 			goto err_rx_desc;
427 		}
428 		rxr->rx_base = (union igc_adv_rx_desc *)rxr->rxdma.dma_vaddr;
429 		bzero((void *)rxr->rx_base, rsize);
430 	}
431 
432 	/* Set up the queue holding structs. */
433 	for (i = 0; i < sc->sc_nqueues; i++) {
434 		iq = &sc->queues[i];
435 		iq->sc = sc;
436 		iq->txr = &sc->tx_rings[i];
437 		iq->rxr = &sc->rx_rings[i];
438 		snprintf(iq->name, sizeof(iq->name), "%s:%d", DEVNAME(sc), i);
439 	}
440 
441 	return 0;
442 
443 err_rx_desc:
444 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
445 		igc_dma_free(sc, &rxr->rxdma);
446 err_tx_desc:
447 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
448 		igc_dma_free(sc, &txr->txdma);
449 	free(sc->rx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct rx_ring));
450 	sc->rx_rings = NULL;
451 rx_fail:
452 	free(sc->tx_rings, M_DEVBUF, sc->sc_nqueues * sizeof(struct tx_ring));
453 	sc->tx_rings = NULL;
454 fail:
455 	return ENOMEM;
456 }
457 
458 void
459 igc_free_pci_resources(struct igc_softc *sc)
460 {
461 	struct igc_osdep *os = &sc->osdep;
462 	struct pci_attach_args *pa = &os->os_pa;
463 	struct igc_queue *iq = sc->queues;
464 	int i;
465 
466 	/* Release all msix queue resources. */
467 	for (i = 0; i < sc->sc_nqueues; i++, iq++) {
468 		if (iq->tag)
469 			pci_intr_disestablish(pa->pa_pc, iq->tag);
470 		iq->tag = NULL;
471 	}
472 
473 	if (sc->tag)
474 		pci_intr_disestablish(pa->pa_pc, sc->tag);
475 	sc->tag = NULL;
476 	if (os->os_membase != 0)
477 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
478 	os->os_membase = 0;
479 }
480 
481 /*********************************************************************
482  *
483  *  Initialize the hardware to a configuration as specified by the
484  *  adapter structure.
485  *
486  **********************************************************************/
487 void
488 igc_reset(struct igc_softc *sc)
489 {
490 	struct igc_hw *hw = &sc->hw;
491 	uint32_t pba;
492 	uint16_t rx_buffer_size;
493 
494 	/* Let the firmware know the OS is in control */
495 	igc_get_hw_control(sc);
496 
497 	/*
498 	 * Packet Buffer Allocation (PBA)
499 	 * Writing PBA sets the receive portion of the buffer
500 	 * the remainder is used for the transmit buffer.
501 	 */
502 	pba = IGC_PBA_34K;
503 
504 	/*
505 	 * These parameters control the automatic generation (Tx) and
506 	 * response (Rx) to Ethernet PAUSE frames.
507 	 * - High water mark should allow for at least two frames to be
508 	 *   received after sending an XOFF.
509 	 * - Low water mark works best when it is very near the high water mark.
510 	 *   This allows the receiver to restart by sending XON when it has
511 	 *   drained a bit. Here we use an arbitrary value of 1500 which will
512 	 *   restart after one full frame is pulled from the buffer. There
513 	 *   could be several smaller frames in the buffer and if so they will
514 	 *   not trigger the XON until their total number reduces the buffer
515 	 *   by 1500.
516 	 * - The pause time is fairly large at 1000 x 512ns = 512 usec.
517 	 */
518 	rx_buffer_size = (pba & 0xffff) << 10;
519 	hw->fc.high_water = rx_buffer_size -
520 	    roundup2(sc->hw.mac.max_frame_size, 1024);
521 	/* 16-byte granularity */
522 	hw->fc.low_water = hw->fc.high_water - 16;
523 
524 	if (sc->fc) /* locally set flow control value? */
525 		hw->fc.requested_mode = sc->fc;
526 	else
527 		hw->fc.requested_mode = igc_fc_full;
528 
529 	hw->fc.pause_time = IGC_FC_PAUSE_TIME;
530 
531 	hw->fc.send_xon = true;
532 
533 	/* Issue a global reset */
534 	igc_reset_hw(hw);
535 	IGC_WRITE_REG(hw, IGC_WUC, 0);
536 
537 	/* and a re-init */
538 	if (igc_init_hw(hw) < 0) {
539 		printf(": Hardware Initialization Failed\n");
540 		return;
541 	}
542 
543 	/* Setup DMA Coalescing */
544 	igc_init_dmac(sc, pba);
545 
546 	IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN);
547 	igc_get_phy_info(hw);
548 	igc_check_for_link(hw);
549 }
550 
551 /*********************************************************************
552  *
553  *  Initialize the DMA Coalescing feature
554  *
555  **********************************************************************/
556 void
557 igc_init_dmac(struct igc_softc *sc, uint32_t pba)
558 {
559 	struct igc_hw *hw = &sc->hw;
560 	uint32_t dmac, reg = ~IGC_DMACR_DMAC_EN;
561 	uint16_t hwm, max_frame_size;
562 	int status;
563 
564 	max_frame_size = sc->hw.mac.max_frame_size;
565 
566 	if (sc->dmac == 0) { /* Disabling it */
567 		IGC_WRITE_REG(hw, IGC_DMACR, reg);
568 		return;
569 	} else
570 		printf(": DMA Coalescing enabled\n");
571 
572 	/* Set starting threshold */
573 	IGC_WRITE_REG(hw, IGC_DMCTXTH, 0);
574 
575 	hwm = 64 * pba - max_frame_size / 16;
576 	if (hwm < 64 * (pba - 6))
577 		hwm = 64 * (pba - 6);
578 	reg = IGC_READ_REG(hw, IGC_FCRTC);
579 	reg &= ~IGC_FCRTC_RTH_COAL_MASK;
580 	reg |= ((hwm << IGC_FCRTC_RTH_COAL_SHIFT)
581 		& IGC_FCRTC_RTH_COAL_MASK);
582 	IGC_WRITE_REG(hw, IGC_FCRTC, reg);
583 
584 	dmac = pba - max_frame_size / 512;
585 	if (dmac < pba - 10)
586 		dmac = pba - 10;
587 	reg = IGC_READ_REG(hw, IGC_DMACR);
588 	reg &= ~IGC_DMACR_DMACTHR_MASK;
589 	reg |= ((dmac << IGC_DMACR_DMACTHR_SHIFT)
590 		& IGC_DMACR_DMACTHR_MASK);
591 
592 	/* transition to L0x or L1 if available..*/
593 	reg |= (IGC_DMACR_DMAC_EN | IGC_DMACR_DMAC_LX_MASK);
594 
595 	/* Check if status is 2.5Gb backplane connection
596 	 * before configuration of watchdog timer, which is
597 	 * in msec values in 12.8usec intervals
598 	 * watchdog timer= msec values in 32usec intervals
599 	 * for non 2.5Gb connection
600 	 */
601 	status = IGC_READ_REG(hw, IGC_STATUS);
602 	if ((status & IGC_STATUS_2P5_SKU) &&
603 	    (!(status & IGC_STATUS_2P5_SKU_OVER)))
604 		reg |= ((sc->dmac * 5) >> 6);
605 	else
606 		reg |= (sc->dmac >> 5);
607 
608 	IGC_WRITE_REG(hw, IGC_DMACR, reg);
609 
610 	IGC_WRITE_REG(hw, IGC_DMCRTRH, 0);
611 
612 	/* Set the interval before transition */
613 	reg = IGC_READ_REG(hw, IGC_DMCTLX);
614 	reg |= IGC_DMCTLX_DCFLUSH_DIS;
615 
616 	/*
617 	** in 2.5Gb connection, TTLX unit is 0.4 usec
618 	** which is 0x4*2 = 0xA. But delay is still 4 usec
619 	*/
620 	status = IGC_READ_REG(hw, IGC_STATUS);
621 	if ((status & IGC_STATUS_2P5_SKU) &&
622 	    (!(status & IGC_STATUS_2P5_SKU_OVER)))
623 		reg |= 0xA;
624 	else
625 		reg |= 0x4;
626 
627 	IGC_WRITE_REG(hw, IGC_DMCTLX, reg);
628 
629 	/* free space in tx packet buffer to wake from DMA coal */
630 	IGC_WRITE_REG(hw, IGC_DMCTXTH, (IGC_TXPBSIZE -
631 	    (2 * max_frame_size)) >> 6);
632 
633 	/* make low power state decision controlled by DMA coal */
634 	reg = IGC_READ_REG(hw, IGC_PCIEMISC);
635 	reg &= ~IGC_PCIEMISC_LX_DECISION;
636 	IGC_WRITE_REG(hw, IGC_PCIEMISC, reg);
637 }
638 
639 int
640 igc_allocate_msix(struct igc_softc *sc)
641 {
642 	struct igc_osdep *os = &sc->osdep;
643 	struct pci_attach_args *pa = &os->os_pa;
644 	struct igc_queue *iq;
645 	pci_intr_handle_t ih;
646 	int i, error = 0;
647 
648 	for (i = 0, iq = sc->queues; i < sc->sc_nqueues; i++, iq++) {
649 		if (pci_intr_map_msix(pa, i, &ih)) {
650 			printf("%s: unable to map msi-x vector %d\n",
651 			    DEVNAME(sc), i);
652 			error = ENOMEM;
653 			goto fail;
654 		}
655 
656 		iq->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
657 		    IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
658 		    igc_intr_queue, iq, iq->name);
659 		if (iq->tag == NULL) {
660 			printf("%s: unable to establish interrupt %d\n",
661 			    DEVNAME(sc), i);
662 			error = ENOMEM;
663 			goto fail;
664 		}
665 
666 		iq->msix = i;
667 		iq->eims = 1 << i;
668 	}
669 
670 	/* Now the link status/control last MSI-X vector. */
671 	if (pci_intr_map_msix(pa, i, &ih)) {
672 		printf("%s: unable to map link vector\n", DEVNAME(sc));
673 		error = ENOMEM;
674 		goto fail;
675 	}
676 
677 	sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
678 	    igc_intr_link, sc, sc->sc_dev.dv_xname);
679 	if (sc->tag == NULL) {
680 		printf("%s: unable to establish link interrupt\n", DEVNAME(sc));
681 		error = ENOMEM;
682 		goto fail;
683 	}
684 
685 	sc->linkvec = i;
686 	printf(", %s, %d queue%s", pci_intr_string(pa->pa_pc, ih),
687 	    i, (i > 1) ? "s" : "");
688 
689 	return 0;
690 fail:
691 	for (iq = sc->queues; i > 0; i--, iq++) {
692 		if (iq->tag == NULL)
693 			continue;
694 		pci_intr_disestablish(pa->pa_pc, iq->tag);
695 		iq->tag = NULL;
696 	}
697 
698 	return error;
699 }
700 
701 void
702 igc_setup_msix(struct igc_softc *sc)
703 {
704 	struct igc_osdep *os = &sc->osdep;
705 	struct pci_attach_args *pa = &os->os_pa;
706 	int nmsix;
707 
708 	nmsix = pci_intr_msix_count(pa);
709 	if (nmsix <= 1)
710 		printf(": not enough msi-x vectors\n");
711 
712 	/* Give one vector to events. */
713 	nmsix--;
714 
715 	sc->sc_intrmap = intrmap_create(&sc->sc_dev, nmsix, IGC_MAX_VECTORS,
716 	    INTRMAP_POWEROF2);
717 	sc->sc_nqueues = intrmap_count(sc->sc_intrmap);
718 }
719 
720 int
721 igc_dma_malloc(struct igc_softc *sc, bus_size_t size, struct igc_dma_alloc *dma)
722 {
723 	struct igc_osdep *os = &sc->osdep;
724 
725 	dma->dma_tag = os->os_pa.pa_dmat;
726 
727 	if (bus_dmamap_create(dma->dma_tag, size, 1, size, 0, BUS_DMA_NOWAIT,
728 	    &dma->dma_map))
729 		return 1;
730 	if (bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
731 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT))
732 		goto destroy;
733 	if (bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
734 	    &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT))
735 		goto free;
736 	if (bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size,
737 	    NULL, BUS_DMA_NOWAIT))
738 		goto unmap;
739 
740 	dma->dma_size = size;
741 
742 	return 0;
743 unmap:
744 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
745 free:
746 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
747 destroy:
748 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
749 	dma->dma_map = NULL;
750 	dma->dma_tag = NULL;
751 	return 1;
752 }
753 
754 void
755 igc_dma_free(struct igc_softc *sc, struct igc_dma_alloc *dma)
756 {
757 	if (dma->dma_tag == NULL)
758 		return;
759 
760 	if (dma->dma_map != NULL) {
761 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
762 		    dma->dma_map->dm_mapsize,
763 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
764 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
765 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
766 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
767 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
768 		dma->dma_map = NULL;
769 	}
770 }
771 
772 /*********************************************************************
773  *
774  *  Setup networking device structure and register an interface.
775  *
776  **********************************************************************/
777 void
778 igc_setup_interface(struct igc_softc *sc)
779 {
780 	struct ifnet *ifp = &sc->sc_ac.ac_if;
781 	int i;
782 
783 	ifp->if_softc = sc;
784 	strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
785 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
786 	ifp->if_xflags = IFXF_MPSAFE;
787 	ifp->if_ioctl = igc_ioctl;
788 	ifp->if_qstart = igc_start;
789 	ifp->if_watchdog = igc_watchdog;
790 	ifp->if_hardmtu = sc->hw.mac.max_frame_size - ETHER_HDR_LEN -
791 	    ETHER_CRC_LEN;
792 	ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
793 
794 	ifp->if_capabilities = IFCAP_VLAN_MTU;
795 
796 #if NVLAN > 0
797 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
798 #endif
799 
800 	ifp->if_capabilities |= IFCAP_CSUM_IPv4;
801 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
802 	ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
803 	ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
804 
805 	/* Initialize ifmedia structures. */
806 	ifmedia_init(&sc->media, IFM_IMASK, igc_media_change, igc_media_status);
807 	ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
808 	ifmedia_add(&sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
809 	ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
810 	ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
811 	ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
812 	ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
813 	ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
814 
815 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
816 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
817 
818 	if_attach(ifp);
819 	ether_ifattach(ifp);
820 
821 	if_attach_queues(ifp, sc->sc_nqueues);
822 	if_attach_iqueues(ifp, sc->sc_nqueues);
823 	for (i = 0; i < sc->sc_nqueues; i++) {
824 		struct ifqueue *ifq = ifp->if_ifqs[i];
825 		struct ifiqueue *ifiq = ifp->if_iqs[i];
826 		struct tx_ring *txr = &sc->tx_rings[i];
827 		struct rx_ring *rxr = &sc->rx_rings[i];
828 
829 		ifq->ifq_softc = txr;
830 		txr->ifq = ifq;
831 
832 		ifiq->ifiq_softc = rxr;
833 		rxr->ifiq = ifiq;
834 	}
835 }
836 
837 void
838 igc_init(void *arg)
839 {
840 	struct igc_softc *sc = (struct igc_softc *)arg;
841 	struct ifnet *ifp = &sc->sc_ac.ac_if;
842 	struct rx_ring *rxr;
843 	uint32_t ctrl = 0;
844 	int i, s;
845 
846 	s = splnet();
847 
848 	igc_stop(sc);
849 
850 	/* Get the latest mac address, user can use a LAA. */
851 	bcopy(sc->sc_ac.ac_enaddr, sc->hw.mac.addr, ETHER_ADDR_LEN);
852 
853 	/* Put the address into the receive address array. */
854 	igc_rar_set(&sc->hw, sc->hw.mac.addr, 0);
855 
856 	/* Initialize the hardware. */
857 	igc_reset(sc);
858 	igc_update_link_status(sc);
859 
860 	/* Setup VLAN support, basic and offload if available. */
861 	IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN);
862 
863 	/* Prepare transmit descriptors and buffers. */
864 	if (igc_setup_transmit_structures(sc)) {
865 		printf("%s: Could not setup transmit structures\n",
866 		    DEVNAME(sc));
867 		igc_stop(sc);
868 		splx(s);
869 		return;
870 	}
871 	igc_initialize_transmit_unit(sc);
872 
873 	sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
874 	/* Prepare receive descriptors and buffers. */
875 	if (igc_setup_receive_structures(sc)) {
876 		printf("%s: Could not setup receive structures\n",
877 		    DEVNAME(sc));
878 		igc_stop(sc);
879 		splx(s);
880 		return;
881         }
882 	igc_initialize_receive_unit(sc);
883 
884 	if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) {
885 		ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL);
886 		ctrl |= IGC_CTRL_VME;
887 		IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl);
888 	}
889 
890 	/* Setup multicast table. */
891 	igc_iff(sc);
892 
893 	igc_clear_hw_cntrs_base_generic(&sc->hw);
894 
895 	igc_configure_queues(sc);
896 
897 	/* This clears any pending interrupts */
898 	IGC_READ_REG(&sc->hw, IGC_ICR);
899 	IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC);
900 
901 	/* The driver can now take control from firmware. */
902 	igc_get_hw_control(sc);
903 
904 	/* Set Energy Efficient Ethernet. */
905 	igc_set_eee_i225(&sc->hw, true, true, true);
906 
907 	for (i = 0; i < sc->sc_nqueues; i++) {
908 		rxr = &sc->rx_rings[i];
909 		igc_rxfill(rxr);
910 		if (if_rxr_inuse(&rxr->rx_ring) == 0) {
911 			printf("%s: Unable to fill any rx descriptors\n",
912 			    DEVNAME(sc));
913 			igc_stop(sc);
914 			splx(s);
915 		}
916 		IGC_WRITE_REG(&sc->hw, IGC_RDT(i),
917 		    (rxr->last_desc_filled + 1) % sc->num_rx_desc);
918 	}
919 
920 	igc_enable_intr(sc);
921 
922 	ifp->if_flags |= IFF_RUNNING;
923 	for (i = 0; i < sc->sc_nqueues; i++)
924 		ifq_clr_oactive(ifp->if_ifqs[i]);
925 
926 	splx(s);
927 }
928 
929 static inline int
930 igc_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
931 {
932 	int error;
933 
934 	error = bus_dmamap_load_mbuf(dmat, map, m,
935 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
936 	if (error != EFBIG)
937 		return (error);
938 
939 	error = m_defrag(m, M_DONTWAIT);
940 	if (error != 0)
941 		return (error);
942 
943 	return (bus_dmamap_load_mbuf(dmat, map, m,
944 	    BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
945 }
946 
947 void
948 igc_start(struct ifqueue *ifq)
949 {
950 	struct ifnet *ifp = ifq->ifq_if;
951 	struct igc_softc *sc = ifp->if_softc;
952 	struct tx_ring *txr = ifq->ifq_softc;
953 	union igc_adv_tx_desc *txdesc;
954 	struct igc_tx_buf *txbuf;
955 	bus_dmamap_t map;
956 	struct mbuf *m;
957 	unsigned int prod, free, last, i;
958 	unsigned int mask;
959 	uint32_t cmd_type_len;
960 	uint32_t olinfo_status;
961 	int post = 0;
962 #if NBPFILTER > 0
963 	caddr_t if_bpf;
964 #endif
965 
966 	if (!sc->link_active) {
967 		ifq_purge(ifq);
968 		return;
969 	}
970 
971 	prod = txr->next_avail_desc;
972 	free = txr->next_to_clean;
973 	if (free <= prod)
974 		free += sc->num_tx_desc;
975 	free -= prod;
976 
977 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
978 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
979 
980 	mask = sc->num_tx_desc - 1;
981 
982 	for (;;) {
983 		if (free <= IGC_MAX_SCATTER + 1) {
984 			ifq_set_oactive(ifq);
985 			break;
986 		}
987 
988 		m = ifq_dequeue(ifq);
989 		if (m == NULL)
990 			break;
991 
992 		txbuf = &txr->tx_buffers[prod];
993 		map = txbuf->map;
994 
995 		if (igc_load_mbuf(txr->txdma.dma_tag, map, m) != 0) {
996 			ifq->ifq_errors++;
997 			m_freem(m);
998 			continue;
999 		}
1000 
1001 		olinfo_status = m->m_pkthdr.len << IGC_ADVTXD_PAYLEN_SHIFT;
1002 
1003 		bus_dmamap_sync(txr->txdma.dma_tag, map, 0,
1004 		    map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1005 
1006 		cmd_type_len = IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DTYP_DATA |
1007 		    IGC_ADVTXD_DCMD_DEXT;
1008 
1009 		if (igc_tx_ctx_setup(txr, m, prod, &cmd_type_len,
1010 		    &olinfo_status)) {
1011 			/* Consume the first descriptor */
1012 			prod++;
1013 			prod &= mask;
1014 			free--;
1015 		}
1016 
1017 		for (i = 0; i < map->dm_nsegs; i++) {
1018 			txdesc = &txr->tx_base[prod];
1019 
1020 			CLR(cmd_type_len, IGC_ADVTXD_DTALEN_MASK);
1021 			cmd_type_len |= map->dm_segs[i].ds_len;
1022 			if (i == map->dm_nsegs - 1)
1023 				cmd_type_len |= IGC_ADVTXD_DCMD_EOP |
1024 				    IGC_ADVTXD_DCMD_RS;
1025 
1026 			htolem64(&txdesc->read.buffer_addr,
1027 			    map->dm_segs[i].ds_addr);
1028 			htolem32(&txdesc->read.cmd_type_len, cmd_type_len);
1029 			htolem32(&txdesc->read.olinfo_status, olinfo_status);
1030 
1031 			last = prod;
1032 
1033 			prod++;
1034 			prod &= mask;
1035 		}
1036 
1037 		txbuf->m_head = m;
1038 		txbuf->eop_index = last;
1039 
1040 #if NBPFILTER > 0
1041 		if_bpf = ifp->if_bpf;
1042 		if (if_bpf)
1043 			bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
1044 #endif
1045 
1046 		free -= i;
1047 		post = 1;
1048 	}
1049 
1050 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
1051 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1052 
1053 	if (post) {
1054 		txr->next_avail_desc = prod;
1055 		IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), prod);
1056 	}
1057 }
1058 
1059 int
1060 igc_txeof(struct tx_ring *txr)
1061 {
1062 	struct igc_softc *sc = txr->sc;
1063 	struct ifqueue *ifq = txr->ifq;
1064 	union igc_adv_tx_desc *txdesc;
1065 	struct igc_tx_buf *txbuf;
1066 	bus_dmamap_t map;
1067 	unsigned int cons, prod, last;
1068 	unsigned int mask;
1069 	int done = 0;
1070 
1071 	prod = txr->next_avail_desc;
1072 	cons = txr->next_to_clean;
1073 
1074 	if (cons == prod)
1075 		return (0);
1076 
1077 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
1078 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1079 
1080 	mask = sc->num_tx_desc - 1;
1081 
1082 	do {
1083 		txbuf = &txr->tx_buffers[cons];
1084 		last = txbuf->eop_index;
1085 		txdesc = &txr->tx_base[last];
1086 
1087 		if (!(txdesc->wb.status & htole32(IGC_TXD_STAT_DD)))
1088 			break;
1089 
1090 		map = txbuf->map;
1091 
1092 		bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1093 		    BUS_DMASYNC_POSTWRITE);
1094 		bus_dmamap_unload(txr->txdma.dma_tag, map);
1095 		m_freem(txbuf->m_head);
1096 
1097 		txbuf->m_head = NULL;
1098 		txbuf->eop_index = -1;
1099 
1100 		cons = last + 1;
1101 		cons &= mask;
1102 
1103 		done = 1;
1104 	} while (cons != prod);
1105 
1106 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
1107 	    txr->txdma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD);
1108 
1109 	txr->next_to_clean = cons;
1110 
1111 	if (ifq_is_oactive(ifq))
1112 		ifq_restart(ifq);
1113 
1114 	return (done);
1115 }
1116 
1117 /*********************************************************************
1118  *
1119  *  This routine disables all traffic on the adapter by issuing a
1120  *  global reset on the MAC.
1121  *
1122  **********************************************************************/
1123 void
1124 igc_stop(struct igc_softc *sc)
1125 {
1126 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1127 	int i;
1128 
1129 	/* Tell the stack that the interface is no longer active. */
1130         ifp->if_flags &= ~IFF_RUNNING;
1131 
1132 	igc_disable_intr(sc);
1133 
1134 	igc_reset_hw(&sc->hw);
1135 	IGC_WRITE_REG(&sc->hw, IGC_WUC, 0);
1136 
1137 	intr_barrier(sc->tag);
1138         for (i = 0; i < sc->sc_nqueues; i++) {
1139                 struct ifqueue *ifq = ifp->if_ifqs[i];
1140                 ifq_barrier(ifq);
1141                 ifq_clr_oactive(ifq);
1142 
1143                 if (sc->queues[i].tag != NULL)
1144                         intr_barrier(sc->queues[i].tag);
1145                 timeout_del(&sc->rx_rings[i].rx_refill);
1146         }
1147 
1148         igc_free_transmit_structures(sc);
1149         igc_free_receive_structures(sc);
1150 
1151 	igc_update_link_status(sc);
1152 }
1153 
1154 /*********************************************************************
1155  *  Ioctl entry point
1156  *
1157  *  igc_ioctl is called when the user wants to configure the
1158  *  interface.
1159  *
1160  *  return 0 on success, positive on failure
1161  **********************************************************************/
1162 int
1163 igc_ioctl(struct ifnet * ifp, u_long cmd, caddr_t data)
1164 {
1165 	struct igc_softc *sc = ifp->if_softc;
1166 	struct ifreq *ifr = (struct ifreq *)data;
1167 	int s, error = 0;
1168 
1169 	s = splnet();
1170 
1171 	switch (cmd) {
1172 	case SIOCSIFADDR:
1173 		ifp->if_flags |= IFF_UP;
1174 		if (!(ifp->if_flags & IFF_RUNNING))
1175 			igc_init(sc);
1176 		break;
1177 	case SIOCSIFFLAGS:
1178 		if (ifp->if_flags & IFF_UP) {
1179 			if (ifp->if_flags & IFF_RUNNING)
1180 				error = ENETRESET;
1181 			else
1182 				igc_init(sc);
1183 		} else {
1184 			if (ifp->if_flags & IFF_RUNNING)
1185 				igc_stop(sc);
1186 		}
1187 		break;
1188 	case SIOCSIFMEDIA:
1189 	case SIOCGIFMEDIA:
1190 		error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
1191 		break;
1192 	case SIOCGIFRXR:
1193 		error = igc_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1194 		break;
1195 	default:
1196 		error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1197 	}
1198 
1199 	if (error == ENETRESET) {
1200 		if (ifp->if_flags & IFF_RUNNING) {
1201 			igc_disable_intr(sc);
1202 			igc_iff(sc);
1203 			igc_enable_intr(sc);
1204 		}
1205 		error = 0;
1206 	}
1207 
1208 	splx(s);
1209 	return error;
1210 }
1211 
1212 int
1213 igc_rxrinfo(struct igc_softc *sc, struct if_rxrinfo *ifri)
1214 {
1215 	struct if_rxring_info *ifr;
1216 	struct rx_ring *rxr;
1217 	int error, i, n = 0;
1218 
1219 	ifr = mallocarray(sc->sc_nqueues, sizeof(*ifr), M_DEVBUF,
1220 	    M_WAITOK | M_ZERO);
1221 
1222 	for (i = 0; i < sc->sc_nqueues; i++) {
1223 		rxr = &sc->rx_rings[i];
1224 		ifr[n].ifr_size = MCLBYTES;
1225 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "%d", i);
1226 		ifr[n].ifr_info = rxr->rx_ring;
1227 		n++;
1228 	}
1229 
1230 	error = if_rxr_info_ioctl(ifri, sc->sc_nqueues, ifr);
1231 	free(ifr, M_DEVBUF, sc->sc_nqueues * sizeof(*ifr));
1232 
1233 	return error;
1234 }
1235 
1236 int
1237 igc_rxfill(struct rx_ring *rxr)
1238 {
1239 	struct igc_softc *sc = rxr->sc;
1240 	int i, post = 0;
1241 	u_int slots;
1242 
1243 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
1244 	    rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1245 
1246 	i = rxr->last_desc_filled;
1247 	for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc); slots > 0;
1248 	    slots--) {
1249 		if (++i == sc->num_rx_desc)
1250 			i = 0;
1251 
1252 		if (igc_get_buf(rxr, i) != 0)
1253 			break;
1254 
1255 		rxr->last_desc_filled = i;
1256 		post = 1;
1257 	}
1258 
1259 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
1260 	    rxr->rxdma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1261 
1262 	if_rxr_put(&rxr->rx_ring, slots);
1263 
1264 	return post;
1265 }
1266 
1267 void
1268 igc_rxrefill(void *xrxr)
1269 {
1270 	struct rx_ring *rxr = xrxr;
1271 	struct igc_softc *sc = rxr->sc;
1272 
1273 	if (igc_rxfill(rxr)) {
1274 		IGC_WRITE_REG(&sc->hw, IGC_RDT(rxr->me),
1275 		    (rxr->last_desc_filled + 1) % sc->num_rx_desc);
1276 	}
1277 	else if (if_rxr_inuse(&rxr->rx_ring) == 0)
1278 		timeout_add(&rxr->rx_refill, 1);
1279 }
1280 
1281 /*********************************************************************
1282  *
1283  *  This routine executes in interrupt context. It replenishes
1284  *  the mbufs in the descriptor and sends data which has been
1285  *  dma'ed into host memory to upper layer.
1286  *
1287  *********************************************************************/
1288 int
1289 igc_rxeof(struct rx_ring *rxr)
1290 {
1291 	struct igc_softc *sc = rxr->sc;
1292 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1293 	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
1294 	struct mbuf *mp, *m;
1295 	struct igc_rx_buf *rxbuf, *nxbuf;
1296 	union igc_adv_rx_desc *rxdesc;
1297 	uint32_t ptype, staterr = 0;
1298 	uint16_t len, vtag;
1299 	uint8_t eop = 0;
1300 	int i, nextp;
1301 
1302 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
1303 		return 0;
1304 
1305 	i = rxr->next_to_check;
1306 	while (if_rxr_inuse(&rxr->rx_ring) > 0) {
1307 		uint32_t hash;
1308 		uint16_t hashtype;
1309 
1310 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1311 		    i * sizeof(union igc_adv_rx_desc),
1312 		    sizeof(union igc_adv_rx_desc), BUS_DMASYNC_POSTREAD);
1313 
1314 		rxdesc = &rxr->rx_base[i];
1315 		staterr = letoh32(rxdesc->wb.upper.status_error);
1316 		if (!ISSET(staterr, IGC_RXD_STAT_DD)) {
1317 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1318 			    i * sizeof(union igc_adv_rx_desc),
1319 			    sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD);
1320 			break;
1321 		}
1322 
1323 		/* Zero out the receive descriptors status. */
1324 		rxdesc->wb.upper.status_error = 0;
1325 		rxbuf = &rxr->rx_buffers[i];
1326 
1327 		/* Pull the mbuf off the ring. */
1328 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
1329 		    rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1330 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
1331 
1332 		mp = rxbuf->buf;
1333 		len = letoh16(rxdesc->wb.upper.length);
1334 		vtag = letoh16(rxdesc->wb.upper.vlan);
1335 		eop = ((staterr & IGC_RXD_STAT_EOP) == IGC_RXD_STAT_EOP);
1336 		ptype = letoh32(rxdesc->wb.lower.lo_dword.data) &
1337 		    IGC_PKTTYPE_MASK;
1338 		hash = letoh32(rxdesc->wb.lower.hi_dword.rss);
1339 		hashtype = le16toh(rxdesc->wb.lower.lo_dword.hs_rss.pkt_info) &
1340 		    IGC_RXDADV_RSSTYPE_MASK;
1341 
1342 		if (staterr & IGC_RXDEXT_STATERR_RXE) {
1343 			if (rxbuf->fmp) {
1344 				m_freem(rxbuf->fmp);
1345 				rxbuf->fmp = NULL;
1346 			}
1347 
1348 			m_freem(mp);
1349 			rxbuf->buf = NULL;
1350 			goto next_desc;
1351 		}
1352 
1353 		if (mp == NULL) {
1354 			panic("%s: igc_rxeof: NULL mbuf in slot %d "
1355 			    "(nrx %d, filled %d)", DEVNAME(sc), i,
1356 			    if_rxr_inuse(&rxr->rx_ring), rxr->last_desc_filled);
1357 		}
1358 
1359 		if (!eop) {
1360 			/*
1361 			 * Figure out the next descriptor of this frame.
1362 			 */
1363 			nextp = i + 1;
1364 			if (nextp == sc->num_rx_desc)
1365 				nextp = 0;
1366 			nxbuf = &rxr->rx_buffers[nextp];
1367 			/* prefetch(nxbuf); */
1368 		}
1369 
1370 		mp->m_len = len;
1371 
1372 		m = rxbuf->fmp;
1373 		rxbuf->buf = rxbuf->fmp = NULL;
1374 
1375 		if (m != NULL)
1376 			m->m_pkthdr.len += mp->m_len;
1377 		else {
1378 			m = mp;
1379 			m->m_pkthdr.len = mp->m_len;
1380 #if NVLAN > 0
1381 			if (staterr & IGC_RXD_STAT_VP) {
1382 				m->m_pkthdr.ether_vtag = vtag;
1383 				m->m_flags |= M_VLANTAG;
1384 			}
1385 #endif
1386 		}
1387 
1388 		/* Pass the head pointer on */
1389 		if (eop == 0) {
1390 			nxbuf->fmp = m;
1391 			m = NULL;
1392 			mp->m_next = nxbuf->buf;
1393 		} else {
1394 			igc_rx_checksum(staterr, m, ptype);
1395 
1396 			if (hashtype != IGC_RXDADV_RSSTYPE_NONE) {
1397 				m->m_pkthdr.ph_flowid = hash;
1398 				SET(m->m_pkthdr.csum_flags, M_FLOWID);
1399 			}
1400 
1401 			ml_enqueue(&ml, m);
1402 		}
1403 next_desc:
1404 		if_rxr_put(&rxr->rx_ring, 1);
1405 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
1406 		    i * sizeof(union igc_adv_rx_desc),
1407 		    sizeof(union igc_adv_rx_desc), BUS_DMASYNC_PREREAD);
1408 
1409 		/* Advance our pointers to the next descriptor. */
1410 		if (++i == sc->num_rx_desc)
1411 			i = 0;
1412 	}
1413 	rxr->next_to_check = i;
1414 
1415 	if (ifiq_input(rxr->ifiq, &ml))
1416 		if_rxr_livelocked(&rxr->rx_ring);
1417 
1418 	if (!(staterr & IGC_RXD_STAT_DD))
1419 		return 0;
1420 
1421 	return 1;
1422 }
1423 
1424 /*********************************************************************
1425  *
1426  *  Verify that the hardware indicated that the checksum is valid.
1427  *  Inform the stack about the status of checksum so that stack
1428  *  doesn't spend time verifying the checksum.
1429  *
1430  *********************************************************************/
1431 void
1432 igc_rx_checksum(uint32_t staterr, struct mbuf *m, uint32_t ptype)
1433 {
1434 	uint16_t status = (uint16_t)staterr;
1435 	uint8_t errors = (uint8_t)(staterr >> 24);
1436 
1437 	if (status & IGC_RXD_STAT_IPCS) {
1438 		if (!(errors & IGC_RXD_ERR_IPE)) {
1439 			/* IP Checksum Good */
1440 			m->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
1441 		} else
1442 			m->m_pkthdr.csum_flags = 0;
1443 	}
1444 
1445 	if (status & (IGC_RXD_STAT_TCPCS | IGC_RXD_STAT_UDPCS)) {
1446 		if (!(errors & IGC_RXD_ERR_TCPE))
1447 			m->m_pkthdr.csum_flags |=
1448 			    M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
1449 	}
1450 }
1451 
1452 void
1453 igc_watchdog(struct ifnet * ifp)
1454 {
1455 }
1456 
1457 /*********************************************************************
1458  *
1459  *  Media Ioctl callback
1460  *
1461  *  This routine is called whenever the user queries the status of
1462  *  the interface using ifconfig.
1463  *
1464  **********************************************************************/
1465 void
1466 igc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1467 {
1468 	struct igc_softc *sc = ifp->if_softc;
1469 
1470 	igc_update_link_status(sc);
1471 
1472 	ifmr->ifm_status = IFM_AVALID;
1473 	ifmr->ifm_active = IFM_ETHER;
1474 
1475 	if (!sc->link_active) {
1476 		ifmr->ifm_active |= IFM_NONE;
1477 		return;
1478 	}
1479 
1480 	ifmr->ifm_status |= IFM_ACTIVE;
1481 
1482 	switch (sc->link_speed) {
1483 	case 10:
1484 		ifmr->ifm_active |= IFM_10_T;
1485 		break;
1486 	case 100:
1487 		ifmr->ifm_active |= IFM_100_TX;
1488                 break;
1489 	case 1000:
1490 		ifmr->ifm_active |= IFM_1000_T;
1491 		break;
1492 	case 2500:
1493                 ifmr->ifm_active |= IFM_2500_T;
1494                 break;
1495 	}
1496 
1497 	if (sc->link_duplex == FULL_DUPLEX)
1498 		ifmr->ifm_active |= IFM_FDX;
1499 	else
1500 		ifmr->ifm_active |= IFM_HDX;
1501 
1502 	switch (sc->hw.fc.current_mode) {
1503 	case igc_fc_tx_pause:
1504 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1505 		break;
1506 	case igc_fc_rx_pause:
1507 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1508 		break;
1509 	case igc_fc_full:
1510 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
1511 		    IFM_ETH_TXPAUSE;
1512 		break;
1513 	default:
1514 		ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
1515 		    IFM_ETH_TXPAUSE);
1516 		break;
1517 	}
1518 }
1519 
1520 /*********************************************************************
1521  *
1522  *  Media Ioctl callback
1523  *
1524  *  This routine is called when the user changes speed/duplex using
1525  *  media/mediopt option with ifconfig.
1526  *
1527  **********************************************************************/
1528 int
1529 igc_media_change(struct ifnet *ifp)
1530 {
1531 	struct igc_softc *sc = ifp->if_softc;
1532 	struct ifmedia *ifm = &sc->media;
1533 
1534 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1535 		return (EINVAL);
1536 
1537 	sc->hw.mac.autoneg = DO_AUTO_NEG;
1538 
1539 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1540 	case IFM_AUTO:
1541 		sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
1542 		break;
1543         case IFM_2500_T:
1544                 sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
1545                 break;
1546 	case IFM_1000_T:
1547 		sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
1548 		break;
1549 	case IFM_100_TX:
1550 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1551 			sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
1552 		else
1553 			sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
1554 		break;
1555 	case IFM_10_T:
1556 		if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
1557 			sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
1558 		else
1559 			sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
1560 		break;
1561 	default:
1562 		return EINVAL;
1563 	}
1564 
1565 	igc_init(sc);
1566 
1567 	return 0;
1568 }
1569 
1570 void
1571 igc_iff(struct igc_softc *sc)
1572 {
1573 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1574         struct arpcom *ac = &sc->sc_ac;
1575 	struct ether_multi *enm;
1576 	struct ether_multistep step;
1577 	uint32_t reg_rctl = 0;
1578 	uint8_t *mta;
1579 	int mcnt = 0;
1580 
1581 	mta = sc->mta;
1582         bzero(mta, sizeof(uint8_t) * ETHER_ADDR_LEN *
1583 	    MAX_NUM_MULTICAST_ADDRESSES);
1584 
1585 	reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
1586 	reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
1587 	ifp->if_flags &= ~IFF_ALLMULTI;
1588 
1589 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1590 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1591 		ifp->if_flags |= IFF_ALLMULTI;
1592 		reg_rctl |= IGC_RCTL_MPE;
1593 		if (ifp->if_flags & IFF_PROMISC)
1594 			reg_rctl |= IGC_RCTL_UPE;
1595 	} else {
1596 		ETHER_FIRST_MULTI(step, ac, enm);
1597 		while (enm != NULL) {
1598 			bcopy(enm->enm_addrlo,
1599 			    &mta[mcnt * ETHER_ADDR_LEN], ETHER_ADDR_LEN);
1600 			mcnt++;
1601 
1602 			ETHER_NEXT_MULTI(step, enm);
1603 		}
1604 
1605 		igc_update_mc_addr_list(&sc->hw, mta, mcnt);
1606 	}
1607 
1608 	IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
1609 }
1610 
1611 void
1612 igc_update_link_status(struct igc_softc *sc)
1613 {
1614 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1615 	struct igc_hw *hw = &sc->hw;
1616 	int link_state;
1617 
1618 	if (hw->mac.get_link_status == true)
1619 		igc_check_for_link(hw);
1620 
1621 	if (IGC_READ_REG(&sc->hw, IGC_STATUS) & IGC_STATUS_LU) {
1622 		if (sc->link_active == 0) {
1623 			igc_get_speed_and_duplex(hw, &sc->link_speed,
1624 			    &sc->link_duplex);
1625 			sc->link_active = 1;
1626 			ifp->if_baudrate = IF_Mbps(sc->link_speed);
1627 		}
1628 		link_state = (sc->link_duplex == FULL_DUPLEX) ?
1629 		    LINK_STATE_FULL_DUPLEX : LINK_STATE_HALF_DUPLEX;
1630 	} else {
1631 		if (sc->link_active == 1) {
1632 			ifp->if_baudrate = sc->link_speed = 0;
1633 			sc->link_duplex = 0;
1634 			sc->link_active = 0;
1635 		}
1636 		link_state = LINK_STATE_DOWN;
1637 	}
1638 	if (ifp->if_link_state != link_state) {
1639 		ifp->if_link_state = link_state;
1640 		if_link_state_change(ifp);
1641 	}
1642 }
1643 
1644 /*********************************************************************
1645  *
1646  *  Get a buffer from system mbuf buffer pool.
1647  *
1648  **********************************************************************/
1649 int
1650 igc_get_buf(struct rx_ring *rxr, int i)
1651 {
1652 	struct igc_softc *sc = rxr->sc;
1653 	struct igc_rx_buf *rxbuf;
1654 	struct mbuf *m;
1655 	union igc_adv_rx_desc *rxdesc;
1656 	int error;
1657 
1658 	rxbuf = &rxr->rx_buffers[i];
1659 	rxdesc = &rxr->rx_base[i];
1660 	if (rxbuf->buf) {
1661 		printf("%s: slot %d already has an mbuf\n", DEVNAME(sc), i);
1662 		return ENOBUFS;
1663 	}
1664 
1665 	m = MCLGETL(NULL, M_DONTWAIT, sc->rx_mbuf_sz);
1666 	if (!m)
1667 		return ENOBUFS;
1668 
1669 	m->m_data += (m->m_ext.ext_size - sc->rx_mbuf_sz);
1670 	m->m_len = m->m_pkthdr.len = sc->rx_mbuf_sz;
1671 
1672 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map, m,
1673 	    BUS_DMA_NOWAIT);
1674 	if (error) {
1675 		m_freem(m);
1676 		return error;
1677 	}
1678 
1679 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
1680 	    rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
1681 	rxbuf->buf = m;
1682 
1683 	rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
1684 
1685 	return 0;
1686 }
1687 
1688 void
1689 igc_configure_queues(struct igc_softc *sc)
1690 {
1691 	struct igc_hw *hw = &sc->hw;
1692 	struct igc_queue *iq = sc->queues;
1693 	uint32_t ivar, newitr = 0;
1694 	int i;
1695 
1696 	/* First turn on RSS capability */
1697 	IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE | IGC_GPIE_EIAME |
1698 	    IGC_GPIE_PBA | IGC_GPIE_NSICR);
1699 
1700 	/* Set the starting interrupt rate */
1701 	newitr = (4000000 / MAX_INTS_PER_SEC) & 0x7FFC;
1702 
1703 	newitr |= IGC_EITR_CNT_IGNR;
1704 
1705 	/* Turn on MSI-X */
1706 	for (i = 0; i < sc->sc_nqueues; i++, iq++) {
1707 		/* RX entries */
1708 		igc_set_queues(sc, i, iq->msix, 0);
1709 		/* TX entries */
1710 		igc_set_queues(sc, i, iq->msix, 1);
1711 		sc->msix_queuesmask |= iq->eims;
1712 		IGC_WRITE_REG(hw, IGC_EITR(iq->msix), newitr);
1713 	}
1714 
1715 	/* And for the link interrupt */
1716 	ivar = (sc->linkvec | IGC_IVAR_VALID) << 8;
1717 	sc->msix_linkmask = 1 << sc->linkvec;
1718 	IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar);
1719 }
1720 
1721 void
1722 igc_set_queues(struct igc_softc *sc, uint32_t entry, uint32_t vector, int type)
1723 {
1724 	struct igc_hw *hw = &sc->hw;
1725 	uint32_t ivar, index;
1726 
1727 	index = entry >> 1;
1728 	ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index);
1729 	if (type) {
1730 		if (entry & 1) {
1731 			ivar &= 0x00FFFFFF;
1732 			ivar |= (vector | IGC_IVAR_VALID) << 24;
1733 		} else {
1734 			ivar &= 0xFFFF00FF;
1735 			ivar |= (vector | IGC_IVAR_VALID) << 8;
1736 		}
1737 	} else {
1738 		if (entry & 1) {
1739 			ivar &= 0xFF00FFFF;
1740 			ivar |= (vector | IGC_IVAR_VALID) << 16;
1741 		} else {
1742 			ivar &= 0xFFFFFF00;
1743 			ivar |= vector | IGC_IVAR_VALID;
1744 		}
1745 	}
1746 	IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar);
1747 }
1748 
1749 void
1750 igc_enable_queue(struct igc_softc *sc, uint32_t eims)
1751 {
1752 	IGC_WRITE_REG(&sc->hw, IGC_EIMS, eims);
1753 }
1754 
1755 void
1756 igc_enable_intr(struct igc_softc *sc)
1757 {
1758 	struct igc_hw *hw = &sc->hw;
1759 	uint32_t mask;
1760 
1761 	mask = (sc->msix_queuesmask | sc->msix_linkmask);
1762 	IGC_WRITE_REG(hw, IGC_EIAC, mask);
1763 	IGC_WRITE_REG(hw, IGC_EIAM, mask);
1764 	IGC_WRITE_REG(hw, IGC_EIMS, mask);
1765 	IGC_WRITE_REG(hw, IGC_IMS, IGC_IMS_LSC);
1766 	IGC_WRITE_FLUSH(hw);
1767 }
1768 
1769 void
1770 igc_disable_intr(struct igc_softc *sc)
1771 {
1772 	struct igc_hw *hw = &sc->hw;
1773 
1774 	IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff);
1775 	IGC_WRITE_REG(hw, IGC_EIAC, 0);
1776 	IGC_WRITE_REG(hw, IGC_IMC, 0xffffffff);
1777 	IGC_WRITE_FLUSH(hw);
1778 }
1779 
1780 int
1781 igc_intr_link(void *arg)
1782 {
1783 	struct igc_softc *sc = (struct igc_softc *)arg;
1784 	uint32_t reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR);
1785 
1786 	if (reg_icr & IGC_ICR_LSC) {
1787 		KERNEL_LOCK();
1788 		sc->hw.mac.get_link_status = true;
1789 		igc_update_link_status(sc);
1790 		KERNEL_UNLOCK();
1791 	}
1792 
1793 	IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC);
1794 	IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->msix_linkmask);
1795 
1796 	return 1;
1797 }
1798 
1799 int
1800 igc_intr_queue(void *arg)
1801 {
1802 	struct igc_queue *iq = arg;
1803 	struct igc_softc *sc = iq->sc;
1804 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1805 	struct rx_ring *rxr = iq->rxr;
1806 	struct tx_ring *txr = iq->txr;
1807 
1808 	if (ifp->if_flags & IFF_RUNNING) {
1809 		igc_txeof(txr);
1810 		igc_rxeof(rxr);
1811 		igc_rxrefill(rxr);
1812 	}
1813 
1814 	igc_enable_queue(sc, iq->eims);
1815 
1816 	return 1;
1817 }
1818 
1819 /*********************************************************************
1820  *
1821  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1822  *  the information needed to transmit a packet on the wire.
1823  *
1824  **********************************************************************/
1825 int
1826 igc_allocate_transmit_buffers(struct tx_ring *txr)
1827 {
1828 	struct igc_softc *sc = txr->sc;
1829 	struct igc_tx_buf *txbuf;
1830 	int error, i;
1831 
1832 	txr->tx_buffers = mallocarray(sc->num_tx_desc,
1833 	    sizeof(struct igc_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO);
1834 	if (txr->tx_buffers == NULL) {
1835 		printf("%s: Unable to allocate tx_buffer memory\n",
1836 		    DEVNAME(sc));
1837 		error = ENOMEM;
1838 		goto fail;
1839 	}
1840 	txr->txtag = txr->txdma.dma_tag;
1841 
1842 	/* Create the descriptor buffer dma maps. */
1843 	for (i = 0; i < sc->num_tx_desc; i++) {
1844 		txbuf = &txr->tx_buffers[i];
1845 		error = bus_dmamap_create(txr->txdma.dma_tag, IGC_TSO_SIZE,
1846 		    IGC_MAX_SCATTER, PAGE_SIZE, 0, BUS_DMA_NOWAIT, &txbuf->map);
1847 		if (error != 0) {
1848 			printf("%s: Unable to create TX DMA map\n",
1849 			    DEVNAME(sc));
1850 			goto fail;
1851 		}
1852 	}
1853 
1854 	return 0;
1855 fail:
1856 	return error;
1857 }
1858 
1859 
1860 /*********************************************************************
1861  *
1862  *  Allocate and initialize transmit structures.
1863  *
1864  **********************************************************************/
1865 int
1866 igc_setup_transmit_structures(struct igc_softc *sc)
1867 {
1868 	struct tx_ring *txr = sc->tx_rings;
1869 	int i;
1870 
1871 	for (i = 0; i < sc->sc_nqueues; i++, txr++) {
1872 		if (igc_setup_transmit_ring(txr))
1873 			goto fail;
1874 	}
1875 
1876 	return 0;
1877 fail:
1878 	igc_free_transmit_structures(sc);
1879 	return ENOBUFS;
1880 }
1881 
1882 /*********************************************************************
1883  *
1884  *  Initialize a transmit ring.
1885  *
1886  **********************************************************************/
1887 int
1888 igc_setup_transmit_ring(struct tx_ring *txr)
1889 {
1890 	struct igc_softc *sc = txr->sc;
1891 
1892 	/* Now allocate transmit buffers for the ring. */
1893 	if (igc_allocate_transmit_buffers(txr))
1894 		return ENOMEM;
1895 
1896 	/* Clear the old ring contents */
1897 	bzero((void *)txr->tx_base,
1898 	    (sizeof(union igc_adv_tx_desc)) * sc->num_tx_desc);
1899 
1900 	/* Reset indices. */
1901 	txr->next_avail_desc = 0;
1902 	txr->next_to_clean = 0;
1903 
1904 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
1905 	    txr->txdma.dma_map->dm_mapsize,
1906 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1907 
1908 	return 0;
1909 }
1910 
1911 /*********************************************************************
1912  *
1913  *  Enable transmit unit.
1914  *
1915  **********************************************************************/
1916 void
1917 igc_initialize_transmit_unit(struct igc_softc *sc)
1918 {
1919 	struct ifnet *ifp = &sc->sc_ac.ac_if;
1920 	struct tx_ring *txr;
1921 	struct igc_hw *hw = &sc->hw;
1922 	uint64_t bus_addr;
1923 	uint32_t tctl, txdctl = 0;
1924         int i;
1925 
1926 	/* Setup the Base and Length of the TX descriptor ring. */
1927 	for (i = 0; i < sc->sc_nqueues; i++) {
1928 		txr = &sc->tx_rings[i];
1929 
1930 		bus_addr = txr->txdma.dma_map->dm_segs[0].ds_addr;
1931 
1932 		/* Base and len of TX ring */
1933 		IGC_WRITE_REG(hw, IGC_TDLEN(i),
1934 		    sc->num_tx_desc * sizeof(union igc_adv_tx_desc));
1935 		IGC_WRITE_REG(hw, IGC_TDBAH(i), (uint32_t)(bus_addr >> 32));
1936 		IGC_WRITE_REG(hw, IGC_TDBAL(i), (uint32_t)bus_addr);
1937 
1938 		/* Init the HEAD/TAIL indices */
1939 		IGC_WRITE_REG(hw, IGC_TDT(i), 0);
1940 		IGC_WRITE_REG(hw, IGC_TDH(i), 0);
1941 
1942 		txr->watchdog_timer = 0;
1943 
1944 		txdctl = 0;		/* Clear txdctl */
1945 		txdctl |= 0x1f;		/* PTHRESH */
1946 		txdctl |= 1 << 8;	/* HTHRESH */
1947 		txdctl |= 1 << 16;	/* WTHRESH */
1948 		txdctl |= 1 << 22;	/* Reserved bit 22 must always be 1 */
1949 		txdctl |= IGC_TXDCTL_GRAN;
1950 		txdctl |= 1 << 25;	/* LWTHRESH */
1951 
1952 		IGC_WRITE_REG(hw, IGC_TXDCTL(i), txdctl);
1953 	}
1954 	ifp->if_timer = 0;
1955 
1956 	/* Program the Transmit Control Register */
1957 	tctl = IGC_READ_REG(&sc->hw, IGC_TCTL);
1958 	tctl &= ~IGC_TCTL_CT;
1959 	tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN |
1960 	    (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));
1961 
1962 	/* This write will effectively turn on the transmit unit. */
1963 	IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl);
1964 }
1965 
1966 /*********************************************************************
1967  *
1968  *  Free all transmit rings.
1969  *
1970  **********************************************************************/
1971 void
1972 igc_free_transmit_structures(struct igc_softc *sc)
1973 {
1974 	struct tx_ring *txr = sc->tx_rings;
1975 	int i;
1976 
1977 	for (i = 0; i < sc->sc_nqueues; i++, txr++)
1978 		igc_free_transmit_buffers(txr);
1979 }
1980 
1981 /*********************************************************************
1982  *
1983  *  Free transmit ring related data structures.
1984  *
1985  **********************************************************************/
1986 void
1987 igc_free_transmit_buffers(struct tx_ring *txr)
1988 {
1989 	struct igc_softc *sc = txr->sc;
1990 	struct igc_tx_buf *txbuf;
1991 	int i;
1992 
1993 	if (txr->tx_buffers == NULL)
1994 		return;
1995 
1996 	txbuf = txr->tx_buffers;
1997 	for (i = 0; i < sc->num_tx_desc; i++, txbuf++) {
1998 		if (txbuf->map != NULL && txbuf->map->dm_nsegs > 0) {
1999 			bus_dmamap_sync(txr->txdma.dma_tag, txbuf->map,
2000 			    0, txbuf->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2001 			bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
2002 		}
2003 		if (txbuf->m_head != NULL) {
2004 			m_freem(txbuf->m_head);
2005 			txbuf->m_head = NULL;
2006 		}
2007 		if (txbuf->map != NULL) {
2008 			bus_dmamap_destroy(txr->txdma.dma_tag, txbuf->map);
2009 			txbuf->map = NULL;
2010 		}
2011 	}
2012 
2013 	if (txr->tx_buffers != NULL)
2014 		free(txr->tx_buffers, M_DEVBUF,
2015 		    sc->num_tx_desc * sizeof(struct igc_tx_buf));
2016 	txr->tx_buffers = NULL;
2017 	txr->txtag = NULL;
2018 }
2019 
2020 
2021 /*********************************************************************
2022  *
2023  *  Advanced Context Descriptor setup for VLAN, CSUM or TSO
2024  *
2025  **********************************************************************/
2026 
2027 int
2028 igc_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp, int prod,
2029     uint32_t *cmd_type_len, uint32_t *olinfo_status)
2030 {
2031 	struct ether_extracted ext;
2032 	struct igc_adv_tx_context_desc *txdesc;
2033 	uint32_t mss_l4len_idx = 0;
2034 	uint32_t type_tucmd_mlhl = 0;
2035 	uint32_t vlan_macip_lens = 0;
2036 	int off = 0;
2037 
2038 	/*
2039 	 * In advanced descriptors the vlan tag must
2040 	 * be placed into the context descriptor. Hence
2041 	 * we need to make one even if not doing offloads.
2042 	 */
2043 #if NVLAN > 0
2044 	if (ISSET(mp->m_flags, M_VLANTAG)) {
2045 		uint32_t vtag = mp->m_pkthdr.ether_vtag;
2046 		vlan_macip_lens |= (vtag << IGC_ADVTXD_VLAN_SHIFT);
2047 		*cmd_type_len |= IGC_ADVTXD_DCMD_VLE;
2048 		off = 1;
2049 	}
2050 #endif
2051 
2052 	ether_extract_headers(mp, &ext);
2053 
2054 	vlan_macip_lens |= (sizeof(*ext.eh) << IGC_ADVTXD_MACLEN_SHIFT);
2055 
2056 	if (ext.ip4) {
2057 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV4;
2058 		if (ISSET(mp->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT)) {
2059 			*olinfo_status |= IGC_TXD_POPTS_IXSM << 8;
2060 			off = 1;
2061 		}
2062 #ifdef INET6
2063 	} else if (ext.ip6) {
2064 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_IPV6;
2065 #endif
2066 	}
2067 
2068 	vlan_macip_lens |= ext.iphlen;
2069 	type_tucmd_mlhl |= IGC_ADVTXD_DCMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
2070 
2071 	if (ext.tcp) {
2072 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;
2073 		if (ISSET(mp->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)) {
2074 			*olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
2075 			off = 1;
2076 		}
2077 	} else if (ext.udp) {
2078 		type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP;
2079 		if (ISSET(mp->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)) {
2080 			*olinfo_status |= IGC_TXD_POPTS_TXSM << 8;
2081 			off = 1;
2082 		}
2083 	}
2084 
2085 	if (ISSET(mp->m_pkthdr.csum_flags, M_TCP_TSO)) {
2086 		if (ext.tcp) {
2087 			uint32_t hdrlen, thlen, paylen, outlen;
2088 
2089 			thlen = ext.tcphlen;
2090 
2091 			outlen = mp->m_pkthdr.ph_mss;
2092 			mss_l4len_idx |= outlen << IGC_ADVTXD_MSS_SHIFT;
2093 			mss_l4len_idx |= thlen << IGC_ADVTXD_L4LEN_SHIFT;
2094 
2095 			hdrlen = sizeof(*ext.eh) + ext.iphlen + thlen;
2096 			paylen = mp->m_pkthdr.len - hdrlen;
2097 			CLR(*olinfo_status, IGC_ADVTXD_PAYLEN_MASK);
2098 			*olinfo_status |= paylen << IGC_ADVTXD_PAYLEN_SHIFT;
2099 
2100 			*cmd_type_len |= IGC_ADVTXD_DCMD_TSE;
2101 			off = 1;
2102 
2103 			tcpstat_add(tcps_outpkttso,
2104 			    (paylen + outlen - 1) / outlen);
2105 		} else
2106 			tcpstat_inc(tcps_outbadtso);
2107 	}
2108 
2109 	if (off == 0)
2110 		return 0;
2111 
2112 	/* Now ready a context descriptor */
2113 	txdesc = (struct igc_adv_tx_context_desc *)&txr->tx_base[prod];
2114 
2115 	/* Now copy bits into descriptor */
2116 	htolem32(&txdesc->vlan_macip_lens, vlan_macip_lens);
2117 	htolem32(&txdesc->type_tucmd_mlhl, type_tucmd_mlhl);
2118 	htolem32(&txdesc->seqnum_seed, 0);
2119 	htolem32(&txdesc->mss_l4len_idx, mss_l4len_idx);
2120 
2121 	return 1;
2122 }
2123 
2124 /*********************************************************************
2125  *
2126  *  Allocate memory for rx_buffer structures. Since we use one
2127  *  rx_buffer per received packet, the maximum number of rx_buffer's
2128  *  that we'll need is equal to the number of receive descriptors
2129  *  that we've allocated.
2130  *
2131  **********************************************************************/
2132 int
2133 igc_allocate_receive_buffers(struct rx_ring *rxr)
2134 {
2135 	struct igc_softc *sc = rxr->sc;
2136 	struct igc_rx_buf *rxbuf;
2137 	int i, error;
2138 
2139 	rxr->rx_buffers = mallocarray(sc->num_rx_desc,
2140 	    sizeof(struct igc_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO);
2141 	if (rxr->rx_buffers == NULL) {
2142 		printf("%s: Unable to allocate rx_buffer memory\n",
2143 		    DEVNAME(sc));
2144 		error = ENOMEM;
2145 		goto fail;
2146 	}
2147 
2148 	rxbuf = rxr->rx_buffers;
2149 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2150 		error = bus_dmamap_create(rxr->rxdma.dma_tag,
2151 		    MAX_JUMBO_FRAME_SIZE, 1, MAX_JUMBO_FRAME_SIZE, 0,
2152 		    BUS_DMA_NOWAIT, &rxbuf->map);
2153 		if (error) {
2154 			printf("%s: Unable to create RX DMA map\n",
2155 			    DEVNAME(sc));
2156 			goto fail;
2157 		}
2158 	}
2159 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2160 	    rxr->rxdma.dma_map->dm_mapsize,
2161 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2162 
2163 	return 0;
2164 fail:
2165 	return error;
2166 }
2167 
2168 /*********************************************************************
2169  *
2170  *  Allocate and initialize receive structures.
2171  *
2172  **********************************************************************/
2173 int
2174 igc_setup_receive_structures(struct igc_softc *sc)
2175 {
2176 	struct rx_ring *rxr = sc->rx_rings;
2177 	int i;
2178 
2179 	for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
2180 		if (igc_setup_receive_ring(rxr))
2181 			goto fail;
2182 	}
2183 
2184 	return 0;
2185 fail:
2186 	igc_free_receive_structures(sc);
2187 	return ENOBUFS;
2188 }
2189 
2190 /*********************************************************************
2191  *
2192  *  Initialize a receive ring and its buffers.
2193  *
2194  **********************************************************************/
2195 int
2196 igc_setup_receive_ring(struct rx_ring *rxr)
2197 {
2198 	struct igc_softc *sc = rxr->sc;
2199 	struct ifnet *ifp = &sc->sc_ac.ac_if;
2200 	int rsize;
2201 
2202 	rsize = roundup2(sc->num_rx_desc * sizeof(union igc_adv_rx_desc),
2203 	    IGC_DBA_ALIGN);
2204 
2205 	/* Clear the ring contents. */
2206 	bzero((void *)rxr->rx_base, rsize);
2207 
2208 	if (igc_allocate_receive_buffers(rxr))
2209 		return ENOMEM;
2210 
2211 	/* Setup our descriptor indices. */
2212 	rxr->next_to_check = 0;
2213 	rxr->last_desc_filled = sc->num_rx_desc - 1;
2214 
2215 	if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
2216 	    sc->num_rx_desc - 1);
2217 
2218 	return 0;
2219 }
2220 
2221 /*********************************************************************
2222  *
2223  *  Enable receive unit.
2224  *
2225  **********************************************************************/
2226 #define BSIZEPKT_ROUNDUP	((1 << IGC_SRRCTL_BSIZEPKT_SHIFT) - 1)
2227 
2228 void
2229 igc_initialize_receive_unit(struct igc_softc *sc)
2230 {
2231         struct rx_ring *rxr = sc->rx_rings;
2232         struct igc_hw *hw = &sc->hw;
2233 	uint32_t rctl, rxcsum, srrctl = 0;
2234 	int i;
2235 
2236 	/*
2237 	 * Make sure receives are disabled while setting
2238 	 * up the descriptor ring.
2239 	 */
2240 	rctl = IGC_READ_REG(hw, IGC_RCTL);
2241 	IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
2242 
2243 	/* Setup the Receive Control Register */
2244 	rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
2245 	rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO |
2246 	    IGC_RCTL_RDMTS_HALF | (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
2247 
2248 	/* Do not store bad packets */
2249 	rctl &= ~IGC_RCTL_SBP;
2250 
2251 	/* Enable Long Packet receive */
2252 	if (sc->hw.mac.max_frame_size != ETHER_MAX_LEN)
2253 		rctl |= IGC_RCTL_LPE;
2254 
2255 	/* Strip the CRC */
2256 	rctl |= IGC_RCTL_SECRC;
2257 
2258 	/*
2259 	 * Set the interrupt throttling rate. Value is calculated
2260 	 * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
2261 	 */
2262 	IGC_WRITE_REG(hw, IGC_ITR, DEFAULT_ITR);
2263 
2264 	rxcsum = IGC_READ_REG(hw, IGC_RXCSUM);
2265 	rxcsum &= ~IGC_RXCSUM_PCSD;
2266 
2267 	if (sc->sc_nqueues > 1)
2268 		rxcsum |= IGC_RXCSUM_PCSD;
2269 
2270 	IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
2271 
2272 	if (sc->sc_nqueues > 1)
2273 		igc_initialize_rss_mapping(sc);
2274 
2275 	/* Set maximum packet buffer len */
2276 	srrctl |= (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
2277 	    IGC_SRRCTL_BSIZEPKT_SHIFT;
2278 	/* srrctl above overrides this but set the register to a sane value */
2279 	rctl |= IGC_RCTL_SZ_2048;
2280 
2281 	/*
2282 	 * If TX flow control is disabled and there's > 1 queue defined,
2283 	 * enable DROP.
2284 	 *
2285 	 * This drops frames rather than hanging the RX MAC for all queues.
2286 	 */
2287 	if ((sc->sc_nqueues > 1) && (sc->fc == igc_fc_none ||
2288 	    sc->fc == igc_fc_rx_pause)) {
2289 		srrctl |= IGC_SRRCTL_DROP_EN;
2290 	}
2291 
2292 	/* Setup the Base and Length of the RX descriptor rings. */
2293 	for (i = 0; i < sc->sc_nqueues; i++, rxr++) {
2294 		IGC_WRITE_REG(hw, IGC_RXDCTL(i), 0);
2295 		uint64_t bus_addr = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2296 		uint32_t rxdctl;
2297 
2298 		srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
2299 
2300 		IGC_WRITE_REG(hw, IGC_RDLEN(i),
2301 		    sc->num_rx_desc * sizeof(union igc_adv_rx_desc));
2302 		IGC_WRITE_REG(hw, IGC_RDBAH(i), (uint32_t)(bus_addr >> 32));
2303 		IGC_WRITE_REG(hw, IGC_RDBAL(i), (uint32_t)bus_addr);
2304 		IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl);
2305 
2306 		/* Setup the Head and Tail Descriptor Pointers */
2307 		IGC_WRITE_REG(hw, IGC_RDH(i), 0);
2308 		IGC_WRITE_REG(hw, IGC_RDT(i), 0);
2309 
2310 		/* Enable this Queue */
2311 		rxdctl = IGC_READ_REG(hw, IGC_RXDCTL(i));
2312 		rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
2313 		rxdctl &= 0xFFF00000;
2314 		rxdctl |= IGC_RX_PTHRESH;
2315 		rxdctl |= IGC_RX_HTHRESH << 8;
2316 		rxdctl |= IGC_RX_WTHRESH << 16;
2317 		IGC_WRITE_REG(hw, IGC_RXDCTL(i), rxdctl);
2318 	}
2319 
2320 	/* Make sure VLAN Filters are off */
2321 	rctl &= ~IGC_RCTL_VFE;
2322 
2323 	/* Write out the settings */
2324 	IGC_WRITE_REG(hw, IGC_RCTL, rctl);
2325 }
2326 
2327 /*********************************************************************
2328  *
2329  *  Free all receive rings.
2330  *
2331  **********************************************************************/
2332 void
2333 igc_free_receive_structures(struct igc_softc *sc)
2334 {
2335 	struct rx_ring *rxr;
2336 	int i;
2337 
2338 	for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
2339 		if_rxr_init(&rxr->rx_ring, 0, 0);
2340 
2341 	for (i = 0, rxr = sc->rx_rings; i < sc->sc_nqueues; i++, rxr++)
2342 		igc_free_receive_buffers(rxr);
2343 }
2344 
2345 /*********************************************************************
2346  *
2347  *  Free receive ring data structures
2348  *
2349  **********************************************************************/
2350 void
2351 igc_free_receive_buffers(struct rx_ring *rxr)
2352 {
2353 	struct igc_softc *sc = rxr->sc;
2354 	struct igc_rx_buf *rxbuf;
2355 	int i;
2356 
2357 	if (rxr->rx_buffers != NULL) {
2358 		for (i = 0; i < sc->num_rx_desc; i++) {
2359 			rxbuf = &rxr->rx_buffers[i];
2360 			if (rxbuf->buf != NULL) {
2361 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2362 				    0, rxbuf->map->dm_mapsize,
2363 				    BUS_DMASYNC_POSTREAD);
2364 				bus_dmamap_unload(rxr->rxdma.dma_tag,
2365 				    rxbuf->map);
2366 				m_freem(rxbuf->buf);
2367 				rxbuf->buf = NULL;
2368 			}
2369 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
2370 			rxbuf->map = NULL;
2371 		}
2372 		free(rxr->rx_buffers, M_DEVBUF,
2373 		    sc->num_rx_desc * sizeof(struct igc_rx_buf));
2374 		rxr->rx_buffers = NULL;
2375 	}
2376 }
2377 
2378 /*
2379  * Initialise the RSS mapping for NICs that support multiple transmit/
2380  * receive rings.
2381  */
2382 void
2383 igc_initialize_rss_mapping(struct igc_softc *sc)
2384 {
2385 	struct igc_hw *hw = &sc->hw;
2386 	uint32_t rss_key[10], mrqc, reta, shift = 0;
2387 	int i, queue_id;
2388 
2389 	/*
2390 	 * The redirection table controls which destination
2391 	 * queue each bucket redirects traffic to.
2392 	 * Each DWORD represents four queues, with the LSB
2393 	 * being the first queue in the DWORD.
2394 	 *
2395 	 * This just allocates buckets to queues using round-robin
2396 	 * allocation.
2397 	 *
2398 	 * NOTE: It Just Happens to line up with the default
2399 	 * RSS allocation method.
2400 	 */
2401 
2402 	/* Warning FM follows */
2403 	reta = 0;
2404 	for (i = 0; i < 128; i++) {
2405 		queue_id = (i % sc->sc_nqueues);
2406 		/* Adjust if required */
2407 		queue_id = queue_id << shift;
2408 
2409 		/*
2410 		 * The low 8 bits are for hash value (n+0);
2411 		 * The next 8 bits are for hash value (n+1), etc.
2412 		 */
2413 		reta = reta >> 8;
2414 		reta = reta | ( ((uint32_t) queue_id) << 24);
2415 		if ((i & 3) == 3) {
2416 			IGC_WRITE_REG(hw, IGC_RETA(i >> 2), reta);
2417 			reta = 0;
2418 		}
2419 	}
2420 
2421 	/*
2422 	 * MRQC: Multiple Receive Queues Command
2423 	 * Set queuing to RSS control, number depends on the device.
2424 	 */
2425 	mrqc = IGC_MRQC_ENABLE_RSS_4Q;
2426 
2427 	/* Set up random bits */
2428         stoeplitz_to_key(&rss_key, sizeof(rss_key));
2429 
2430 	/* Now fill our hash function seeds */
2431 	for (i = 0; i < 10; i++)
2432 		IGC_WRITE_REG_ARRAY(hw, IGC_RSSRK(0), i, rss_key[i]);
2433 
2434 	/*
2435 	 * Configure the RSS fields to hash upon.
2436 	 */
2437 	mrqc |= (IGC_MRQC_RSS_FIELD_IPV4 | IGC_MRQC_RSS_FIELD_IPV4_TCP);
2438 	mrqc |= (IGC_MRQC_RSS_FIELD_IPV6 | IGC_MRQC_RSS_FIELD_IPV6_TCP);
2439 	mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
2440 
2441 	IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
2442 }
2443 
2444 /*
2445  * igc_get_hw_control sets the {CTRL_EXT|FWSM}:DRV_LOAD bit.
2446  * For ASF and Pass Through versions of f/w this means
2447  * that the driver is loaded. For AMT version type f/w
2448  * this means that the network i/f is open.
2449  */
2450 void
2451 igc_get_hw_control(struct igc_softc *sc)
2452 {
2453 	uint32_t ctrl_ext;
2454 
2455 	ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
2456 	IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
2457 }
2458 
2459 /*
2460  * igc_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
2461  * For ASF and Pass Through versions of f/w this means that
2462  * the driver is no longer loaded. For AMT versions of the
2463  * f/w this means that the network i/f is closed.
2464  */
2465 void
2466 igc_release_hw_control(struct igc_softc *sc)
2467 {
2468 	uint32_t ctrl_ext;
2469 
2470 	ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
2471 	IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
2472 }
2473 
2474 int
2475 igc_is_valid_ether_addr(uint8_t *addr)
2476 {
2477 	char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
2478 
2479 	if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
2480 		return 0;
2481 	}
2482 
2483 	return 1;
2484 }
2485