xref: /openbsd-src/sys/dev/pci/if_ix.c (revision ae3cb403620ab940fbaabb3055fac045a63d56b7)
1 /*	$OpenBSD: if_ix.c,v 1.152 2017/06/22 02:44:37 deraadt Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2013, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /* FreeBSD: src/sys/dev/ixgbe/ixgbe.c 251964 Jun 18 21:28:19 2013 UTC */
36 
37 #include <dev/pci/if_ix.h>
38 #include <dev/pci/ixgbe_type.h>
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 /* char ixgbe_driver_version[] = "2.5.13"; */
44 
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *********************************************************************/
50 
51 const struct pci_matchid ixgbe_devices[] = {
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598 },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_BX },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT2 },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
60 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
61 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_XF_LR },
62 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_SFP },
63 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_SR_DUAL_EM },
64 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_DA_DUAL },
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4 },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4_MEZZ },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_XAUI },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_COMBO_BP },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_CX4 },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_T3_LOM },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_EM },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF_QP },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF2 },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_FCOE },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599EN_SFP },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_QSFP_SF_QP },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T1 },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T1 },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KX4 },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KR },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_SFP },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_10G_T },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_1G_T },
88 };
89 
90 /*********************************************************************
91  *  Function prototypes
92  *********************************************************************/
93 int	ixgbe_probe(struct device *, void *, void *);
94 void	ixgbe_attach(struct device *, struct device *, void *);
95 int	ixgbe_detach(struct device *, int);
96 void	ixgbe_start(struct ifqueue *);
97 int	ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
98 int	ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *);
99 void	ixgbe_watchdog(struct ifnet *);
100 void	ixgbe_init(void *);
101 void	ixgbe_stop(void *);
102 void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
103 int	ixgbe_media_change(struct ifnet *);
104 void	ixgbe_identify_hardware(struct ix_softc *);
105 int	ixgbe_allocate_pci_resources(struct ix_softc *);
106 int	ixgbe_allocate_legacy(struct ix_softc *);
107 int	ixgbe_allocate_queues(struct ix_softc *);
108 void	ixgbe_free_pci_resources(struct ix_softc *);
109 void	ixgbe_local_timer(void *);
110 void	ixgbe_setup_interface(struct ix_softc *);
111 void	ixgbe_config_gpie(struct ix_softc *);
112 void	ixgbe_config_delay_values(struct ix_softc *);
113 void	ixgbe_add_media_types(struct ix_softc *);
114 void	ixgbe_config_link(struct ix_softc *);
115 
116 int	ixgbe_allocate_transmit_buffers(struct tx_ring *);
117 int	ixgbe_setup_transmit_structures(struct ix_softc *);
118 int	ixgbe_setup_transmit_ring(struct tx_ring *);
119 void	ixgbe_initialize_transmit_units(struct ix_softc *);
120 void	ixgbe_free_transmit_structures(struct ix_softc *);
121 void	ixgbe_free_transmit_buffers(struct tx_ring *);
122 
123 int	ixgbe_allocate_receive_buffers(struct rx_ring *);
124 int	ixgbe_setup_receive_structures(struct ix_softc *);
125 int	ixgbe_setup_receive_ring(struct rx_ring *);
126 void	ixgbe_initialize_receive_units(struct ix_softc *);
127 void	ixgbe_free_receive_structures(struct ix_softc *);
128 void	ixgbe_free_receive_buffers(struct rx_ring *);
129 void	ixgbe_initialize_rss_mapping(struct ix_softc *);
130 int	ixgbe_rxfill(struct rx_ring *);
131 void	ixgbe_rxrefill(void *);
132 
133 void	ixgbe_enable_intr(struct ix_softc *);
134 void	ixgbe_disable_intr(struct ix_softc *);
135 void	ixgbe_update_stats_counters(struct ix_softc *);
136 int	ixgbe_txeof(struct tx_ring *);
137 int	ixgbe_rxeof(struct ix_queue *);
138 void	ixgbe_rx_checksum(uint32_t, struct mbuf *, uint32_t);
139 void	ixgbe_iff(struct ix_softc *);
140 #ifdef IX_DEBUG
141 void	ixgbe_print_hw_stats(struct ix_softc *);
142 #endif
143 void	ixgbe_update_link_status(struct ix_softc *);
144 int	ixgbe_get_buf(struct rx_ring *, int);
145 int	ixgbe_encap(struct tx_ring *, struct mbuf *);
146 int	ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
147 		    struct ixgbe_dma_alloc *, int);
148 void	ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
149 int	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *, uint32_t *,
150 	    uint32_t *);
151 int	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *,
152 	    uint32_t *);
153 void	ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
154 void	ixgbe_configure_ivars(struct ix_softc *);
155 uint8_t	*ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
156 
157 void	ixgbe_setup_vlan_hw_support(struct ix_softc *);
158 
159 /* Support for pluggable optic modules */
160 void	ixgbe_setup_optics(struct ix_softc *);
161 void	ixgbe_handle_mod(struct ix_softc *);
162 void	ixgbe_handle_msf(struct ix_softc *);
163 void	ixgbe_handle_phy(struct ix_softc *);
164 
165 /* Legacy (single vector interrupt handler */
166 int	ixgbe_intr(void *);
167 void	ixgbe_enable_queue(struct ix_softc *, uint32_t);
168 void	ixgbe_disable_queue(struct ix_softc *, uint32_t);
169 void	ixgbe_rearm_queue(struct ix_softc *, uint32_t);
170 
171 /*********************************************************************
172  *  OpenBSD Device Interface Entry Points
173  *********************************************************************/
174 
175 struct cfdriver ix_cd = {
176 	NULL, "ix", DV_IFNET
177 };
178 
179 struct cfattach ix_ca = {
180 	sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach
181 };
182 
183 int ixgbe_smart_speed = ixgbe_smart_speed_on;
184 
185 /*********************************************************************
186  *  Device identification routine
187  *
188  *  ixgbe_probe determines if the driver should be loaded on
189  *  adapter based on PCI vendor/device id of the adapter.
190  *
191  *  return 0 on success, positive on failure
192  *********************************************************************/
193 
194 int
195 ixgbe_probe(struct device *parent, void *match, void *aux)
196 {
197 	INIT_DEBUGOUT("ixgbe_probe: begin");
198 
199 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
200 	    nitems(ixgbe_devices)));
201 }
202 
203 /*********************************************************************
204  *  Device initialization routine
205  *
206  *  The attach entry point is called when the driver is being loaded.
207  *  This routine identifies the type of hardware, allocates all resources
208  *  and initializes the hardware.
209  *
210  *  return 0 on success, positive on failure
211  *********************************************************************/
212 
213 void
214 ixgbe_attach(struct device *parent, struct device *self, void *aux)
215 {
216 	struct pci_attach_args	*pa = (struct pci_attach_args *)aux;
217 	struct ix_softc		*sc = (struct ix_softc *)self;
218 	int			 error = 0;
219 	uint16_t		 csum;
220 	uint32_t			 ctrl_ext;
221 	struct ixgbe_hw		*hw = &sc->hw;
222 
223 	INIT_DEBUGOUT("ixgbe_attach: begin");
224 
225 	sc->osdep.os_sc = sc;
226 	sc->osdep.os_pa = *pa;
227 
228 	/* Set up the timer callout */
229 	timeout_set(&sc->timer, ixgbe_local_timer, sc);
230 	timeout_set(&sc->rx_refill, ixgbe_rxrefill, sc);
231 
232 	/* Determine hardware revision */
233 	ixgbe_identify_hardware(sc);
234 
235 	/* Indicate to RX setup to use Jumbo Clusters */
236 	sc->num_tx_desc = DEFAULT_TXD;
237 	sc->num_rx_desc = DEFAULT_RXD;
238 
239 	/* Do base PCI setup - map BAR0 */
240 	if (ixgbe_allocate_pci_resources(sc))
241 		goto err_out;
242 
243 	/* Allocate our TX/RX Queues */
244 	if (ixgbe_allocate_queues(sc))
245 		goto err_out;
246 
247 	/* Allocate multicast array memory. */
248 	sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS,
249 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
250 	if (sc->mta == NULL) {
251 		printf(": Can not allocate multicast setup array\n");
252 		goto err_late;
253 	}
254 
255 	/* Initialize the shared code */
256 	error = ixgbe_init_shared_code(hw);
257 	if (error) {
258 		printf(": Unable to initialize the shared code\n");
259 		goto err_late;
260 	}
261 
262 	/* Make sure we have a good EEPROM before we read from it */
263 	if (sc->hw.eeprom.ops.validate_checksum(&sc->hw, &csum) < 0) {
264 		printf(": The EEPROM Checksum Is Not Valid\n");
265 		goto err_late;
266 	}
267 
268 	error = ixgbe_init_hw(hw);
269 	if (error == IXGBE_ERR_EEPROM_VERSION) {
270 		printf(": This device is a pre-production adapter/"
271 		    "LOM.  Please be aware there may be issues associated "
272 		    "with your hardware.\nIf you are experiencing problems "
273 		    "please contact your Intel or hardware representative "
274 		    "who provided you with this hardware.\n");
275 	} else if (error && (error != IXGBE_ERR_SFP_NOT_PRESENT &&
276 	    error != IXGBE_ERR_SFP_NOT_SUPPORTED)) {
277 		printf(": Hardware Initialization Failure\n");
278 		goto err_late;
279 	}
280 
281 	/* Detect and set physical type */
282 	ixgbe_setup_optics(sc);
283 
284 	bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
285 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
286 
287 	error = ixgbe_allocate_legacy(sc);
288 	if (error)
289 		goto err_late;
290 
291 	/* Enable the optics for 82599 SFP+ fiber */
292 	if (sc->hw.mac.ops.enable_tx_laser)
293 		sc->hw.mac.ops.enable_tx_laser(&sc->hw);
294 
295 	/* Enable power to the phy */
296 	if (hw->phy.ops.set_phy_power)
297 		hw->phy.ops.set_phy_power(&sc->hw, TRUE);
298 
299 	/* Setup OS specific network interface */
300 	ixgbe_setup_interface(sc);
301 
302 	/* Initialize statistics */
303 	ixgbe_update_stats_counters(sc);
304 
305 	/* Get the PCI-E bus info and determine LAN ID */
306 	hw->mac.ops.get_bus_info(hw);
307 
308 	/* Set an initial default flow control value */
309 	sc->fc = ixgbe_fc_full;
310 
311 	/* let hardware know driver is loaded */
312 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
313 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
314 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
315 
316 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
317 
318 	INIT_DEBUGOUT("ixgbe_attach: end");
319 	return;
320 
321 err_late:
322 	ixgbe_free_transmit_structures(sc);
323 	ixgbe_free_receive_structures(sc);
324 err_out:
325 	ixgbe_free_pci_resources(sc);
326 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
327 	    MAX_NUM_MULTICAST_ADDRESSES);
328 }
329 
330 /*********************************************************************
331  *  Device removal routine
332  *
333  *  The detach entry point is called when the driver is being removed.
334  *  This routine stops the adapter and deallocates all the resources
335  *  that were allocated for driver operation.
336  *
337  *  return 0 on success, positive on failure
338  *********************************************************************/
339 
340 int
341 ixgbe_detach(struct device *self, int flags)
342 {
343 	struct ix_softc *sc = (struct ix_softc *)self;
344 	struct ifnet *ifp = &sc->arpcom.ac_if;
345 	uint32_t	ctrl_ext;
346 
347 	INIT_DEBUGOUT("ixgbe_detach: begin");
348 
349 	ixgbe_stop(sc);
350 
351 	/* let hardware know driver is unloading */
352 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
353 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
354 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
355 
356 	ether_ifdetach(ifp);
357 	if_detach(ifp);
358 
359 	timeout_del(&sc->timer);
360 	timeout_del(&sc->rx_refill);
361 	ixgbe_free_pci_resources(sc);
362 
363 	ixgbe_free_transmit_structures(sc);
364 	ixgbe_free_receive_structures(sc);
365 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
366 	    MAX_NUM_MULTICAST_ADDRESSES);
367 
368 	return (0);
369 }
370 
371 /*********************************************************************
372  *  Transmit entry point
373  *
374  *  ixgbe_start is called by the stack to initiate a transmit.
375  *  The driver will remain in this routine as long as there are
376  *  packets to transmit and transmit resources are available.
377  *  In case resources are not available stack is notified and
378  *  the packet is requeued.
379  **********************************************************************/
380 
381 void
382 ixgbe_start(struct ifqueue *ifq)
383 {
384 	struct ifnet		*ifp = ifq->ifq_if;
385 	struct ix_softc		*sc = ifp->if_softc;
386 	struct tx_ring		*txr = sc->tx_rings;
387 	struct mbuf  		*m_head;
388 	int			 post = 0;
389 
390 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(ifq))
391 		return;
392 	if (!sc->link_up)
393 		return;
394 
395 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
396 	    txr->txdma.dma_map->dm_mapsize,
397 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
398 
399 	for (;;) {
400 		/* Check that we have the minimal number of TX descriptors. */
401 		if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
402 			ifq_set_oactive(ifq);
403 			break;
404 		}
405 
406 		m_head = ifq_dequeue(ifq);
407 		if (m_head == NULL)
408 			break;
409 
410 		if (ixgbe_encap(txr, m_head)) {
411 			m_freem(m_head);
412 			continue;
413 		}
414 
415 #if NBPFILTER > 0
416 		if (ifp->if_bpf)
417 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
418 #endif
419 
420 		/* Set timeout in case hardware has problems transmitting */
421 		txr->watchdog_timer = IXGBE_TX_TIMEOUT;
422 		ifp->if_timer = IXGBE_TX_TIMEOUT;
423 
424 		post = 1;
425 	}
426 
427 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
428 	    0, txr->txdma.dma_map->dm_mapsize,
429 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
430 
431 	/*
432 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
433 	 * hardware that this frame is available to transmit.
434 	 */
435 	if (post)
436 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
437 		    txr->next_avail_desc);
438 }
439 
440 /*********************************************************************
441  *  Ioctl entry point
442  *
443  *  ixgbe_ioctl is called when the user wants to configure the
444  *  interface.
445  *
446  *  return 0 on success, positive on failure
447  **********************************************************************/
448 
449 int
450 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
451 {
452 	struct ix_softc	*sc = ifp->if_softc;
453 	struct ifreq	*ifr = (struct ifreq *) data;
454 	int		s, error = 0;
455 
456 	s = splnet();
457 
458 	switch (command) {
459 	case SIOCSIFADDR:
460 		IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
461 		ifp->if_flags |= IFF_UP;
462 		if (!(ifp->if_flags & IFF_RUNNING))
463 			ixgbe_init(sc);
464 		break;
465 
466 	case SIOCSIFFLAGS:
467 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
468 		if (ifp->if_flags & IFF_UP) {
469 			if (ifp->if_flags & IFF_RUNNING)
470 				error = ENETRESET;
471 			else
472 				ixgbe_init(sc);
473 		} else {
474 			if (ifp->if_flags & IFF_RUNNING)
475 				ixgbe_stop(sc);
476 		}
477 		break;
478 
479 	case SIOCSIFMEDIA:
480 	case SIOCGIFMEDIA:
481 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
482 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
483 		break;
484 
485 	case SIOCGIFRXR:
486 		error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
487 		break;
488 
489 	default:
490 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
491 	}
492 
493 	if (error == ENETRESET) {
494 		if (ifp->if_flags & IFF_RUNNING) {
495 			ixgbe_disable_intr(sc);
496 			ixgbe_iff(sc);
497 			ixgbe_enable_intr(sc);
498 		}
499 		error = 0;
500 	}
501 
502 	splx(s);
503 	return (error);
504 }
505 
506 int
507 ixgbe_rxrinfo(struct ix_softc *sc, struct if_rxrinfo *ifri)
508 {
509 	struct if_rxring_info *ifr, ifr1;
510 	struct rx_ring *rxr;
511 	int error, i;
512 	u_int n = 0;
513 
514 	if (sc->num_queues > 1) {
515 		if ((ifr = mallocarray(sc->num_queues, sizeof(*ifr), M_DEVBUF,
516 		    M_WAITOK | M_ZERO)) == NULL)
517 			return (ENOMEM);
518 	} else
519 		ifr = &ifr1;
520 
521 	for (i = 0; i < sc->num_queues; i++) {
522 		rxr = &sc->rx_rings[i];
523 		ifr[n].ifr_size = sc->rx_mbuf_sz;
524 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
525 		ifr[n].ifr_info = rxr->rx_ring;
526 		n++;
527 	}
528 
529 	error = if_rxr_info_ioctl(ifri, sc->num_queues, ifr);
530 
531 	if (sc->num_queues > 1)
532 		free(ifr, M_DEVBUF, sc->num_queues * sizeof(*ifr));
533 	return (error);
534 }
535 
536 /*********************************************************************
537  *  Watchdog entry point
538  *
539  **********************************************************************/
540 
541 void
542 ixgbe_watchdog(struct ifnet * ifp)
543 {
544 	struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
545 	struct tx_ring *txr = sc->tx_rings;
546 	struct ixgbe_hw *hw = &sc->hw;
547 	int		tx_hang = FALSE;
548 	int		i;
549 
550 	/*
551 	 * The timer is set to 5 every time ixgbe_start() queues a packet.
552 	 * Anytime all descriptors are clean the timer is set to 0.
553 	 */
554 	for (i = 0; i < sc->num_queues; i++, txr++) {
555 		if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
556 			continue;
557 		else {
558 			tx_hang = TRUE;
559 			break;
560 		}
561 	}
562 	if (tx_hang == FALSE)
563 		return;
564 
565 	/*
566 	 * If we are in this routine because of pause frames, then don't
567 	 * reset the hardware.
568 	 */
569 	if (!(IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXON)) {
570 		for (i = 0; i < sc->num_queues; i++, txr++)
571 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
572 		ifp->if_timer = IXGBE_TX_TIMEOUT;
573 		return;
574 	}
575 
576 
577 	printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
578 	for (i = 0; i < sc->num_queues; i++, txr++) {
579 		printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
580 		    IXGBE_READ_REG(hw, IXGBE_TDH(i)),
581 		    IXGBE_READ_REG(hw, IXGBE_TDT(i)));
582 		printf("%s: TX(%d) desc avail = %d, Next TX to Clean = %d\n", ifp->if_xname,
583 		    i, txr->tx_avail, txr->next_to_clean);
584 	}
585 	ifp->if_flags &= ~IFF_RUNNING;
586 	sc->watchdog_events++;
587 
588 	ixgbe_init(sc);
589 }
590 
591 /*********************************************************************
592  *  Init entry point
593  *
594  *  This routine is used in two ways. It is used by the stack as
595  *  init entry point in network interface structure. It is also used
596  *  by the driver as a hw/sw initialization routine to get to a
597  *  consistent state.
598  *
599  *  return 0 on success, positive on failure
600  **********************************************************************/
601 #define IXGBE_MHADD_MFS_SHIFT 16
602 
603 void
604 ixgbe_init(void *arg)
605 {
606 	struct ix_softc	*sc = (struct ix_softc *)arg;
607 	struct ifnet	*ifp = &sc->arpcom.ac_if;
608 	struct rx_ring	*rxr = sc->rx_rings;
609 	uint32_t	 k, txdctl, rxdctl, rxctrl, mhadd, itr;
610 	int		 i, s, err;
611 
612 	INIT_DEBUGOUT("ixgbe_init: begin");
613 
614 	s = splnet();
615 
616 	ixgbe_stop(sc);
617 
618 	/* reprogram the RAR[0] in case user changed it. */
619 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
620 
621 	/* Get the latest mac address, User can use a LAA */
622 	bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
623 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
624 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, 1);
625 	sc->hw.addr_ctrl.rar_used_count = 1;
626 
627 	/* Prepare transmit descriptors and buffers */
628 	if (ixgbe_setup_transmit_structures(sc)) {
629 		printf("%s: Could not setup transmit structures\n",
630 		    ifp->if_xname);
631 		ixgbe_stop(sc);
632 		splx(s);
633 		return;
634 	}
635 
636 	ixgbe_init_hw(&sc->hw);
637 	ixgbe_initialize_transmit_units(sc);
638 
639 	/* Use 2k clusters, even for jumbo frames */
640 	sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
641 
642 	/* Prepare receive descriptors and buffers */
643 	if (ixgbe_setup_receive_structures(sc)) {
644 		printf("%s: Could not setup receive structures\n",
645 		    ifp->if_xname);
646 		ixgbe_stop(sc);
647 		splx(s);
648 		return;
649 	}
650 
651 	/* Configure RX settings */
652 	ixgbe_initialize_receive_units(sc);
653 
654 	/* Enable SDP & MSIX interrupts based on adapter */
655 	ixgbe_config_gpie(sc);
656 
657 	/* Program promiscuous mode and multicast filters. */
658 	ixgbe_iff(sc);
659 
660 	/* Set MRU size */
661 	mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
662 	mhadd &= ~IXGBE_MHADD_MFS_MASK;
663 	mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
664 	IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
665 
666 	/* Now enable all the queues */
667 	for (i = 0; i < sc->num_queues; i++) {
668 		txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
669 		txdctl |= IXGBE_TXDCTL_ENABLE;
670 		/* Set WTHRESH to 8, burst writeback */
671 		txdctl |= (8 << 16);
672 		/*
673 		 * When the internal queue falls below PTHRESH (16),
674 		 * start prefetching as long as there are at least
675 		 * HTHRESH (1) buffers ready.
676 		 */
677 		txdctl |= (16 << 0) | (1 << 8);
678 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
679 	}
680 
681 	for (i = 0; i < sc->num_queues; i++) {
682 		rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
683 		if (sc->hw.mac.type == ixgbe_mac_82598EB) {
684 			/*
685 			 * PTHRESH = 21
686 			 * HTHRESH = 4
687 			 * WTHRESH = 8
688 			 */
689 			rxdctl &= ~0x3FFFFF;
690 			rxdctl |= 0x080420;
691 		}
692 		rxdctl |= IXGBE_RXDCTL_ENABLE;
693 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
694 		for (k = 0; k < 10; k++) {
695 			if (IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i)) &
696 			    IXGBE_RXDCTL_ENABLE)
697 				break;
698 			else
699 				msec_delay(1);
700 		}
701 		IXGBE_WRITE_FLUSH(&sc->hw);
702 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_desc_filled);
703 	}
704 
705 	/* Set up VLAN support and filter */
706 	ixgbe_setup_vlan_hw_support(sc);
707 
708 	/* Enable Receive engine */
709 	rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
710 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
711 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
712 	rxctrl |= IXGBE_RXCTRL_RXEN;
713 	sc->hw.mac.ops.enable_rx_dma(&sc->hw, rxctrl);
714 
715 	timeout_add_sec(&sc->timer, 1);
716 
717 	/* Set up MSI/X routing */
718 	if (sc->msix > 1) {
719 		ixgbe_configure_ivars(sc);
720 		/* Set up auto-mask */
721 		if (sc->hw.mac.type == ixgbe_mac_82598EB)
722 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
723 		else {
724 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
725 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
726 		}
727 	} else {  /* Simple settings for Legacy/MSI */
728 		ixgbe_set_ivar(sc, 0, 0, 0);
729 		ixgbe_set_ivar(sc, 0, 0, 1);
730 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
731 	}
732 
733 	/* Check on any SFP devices that need to be kick-started */
734 	if (sc->hw.phy.type == ixgbe_phy_none) {
735 		err = sc->hw.phy.ops.identify(&sc->hw);
736 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
737 			printf("Unsupported SFP+ module type was detected.\n");
738 			splx(s);
739 			return;
740 		}
741 	}
742 
743 	/* Setup interrupt moderation */
744 	itr = (4000000 / IXGBE_INTS_PER_SEC) & 0xff8;
745 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
746 		itr |= IXGBE_EITR_LLI_MOD | IXGBE_EITR_CNT_WDIS;
747 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(0), itr);
748 
749 	/* Enable power to the phy */
750 	if (sc->hw.phy.ops.set_phy_power)
751 		sc->hw.phy.ops.set_phy_power(&sc->hw, TRUE);
752 
753 	/* Config/Enable Link */
754 	ixgbe_config_link(sc);
755 
756 	/* Hardware Packet Buffer & Flow Control setup */
757 	ixgbe_config_delay_values(sc);
758 
759 	/* Initialize the FC settings */
760 	sc->hw.mac.ops.start_hw(&sc->hw);
761 
762 	/* And now turn on interrupts */
763 	ixgbe_enable_intr(sc);
764 
765 	/* Now inform the stack we're ready */
766 	ifp->if_flags |= IFF_RUNNING;
767 	ifq_clr_oactive(&ifp->if_snd);
768 
769 	splx(s);
770 }
771 
772 void
773 ixgbe_config_gpie(struct ix_softc *sc)
774 {
775 	struct ixgbe_hw	*hw = &sc->hw;
776 	uint32_t gpie;
777 
778 	gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
779 
780 	/* Fan Failure Interrupt */
781 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
782 		gpie |= IXGBE_SDP1_GPIEN;
783 
784 	if (sc->hw.mac.type == ixgbe_mac_82599EB) {
785 		/* Add for Module detection */
786 		gpie |= IXGBE_SDP2_GPIEN;
787 
788 		/* Media ready */
789 		if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
790 			gpie |= IXGBE_SDP1_GPIEN;
791 
792 		/*
793 		 * Set LL interval to max to reduce the number of low latency
794 		 * interrupts hitting the card when the ring is getting full.
795 		 */
796 		gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
797 	}
798 
799 	if (sc->hw.mac.type == ixgbe_mac_X540 ||
800 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
801 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
802 		/*
803 		 * Thermal Failure Detection (X540)
804 		 * Link Detection (X552 SFP+, X552/X557-AT)
805 		 */
806 		gpie |= IXGBE_SDP0_GPIEN_X540;
807 
808 		/*
809 		 * Set LL interval to max to reduce the number of low latency
810 		 * interrupts hitting the card when the ring is getting full.
811 		 */
812 		gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
813 	}
814 
815 	if (sc->msix > 1) {
816 		/* Enable Enhanced MSIX mode */
817 		gpie |= IXGBE_GPIE_MSIX_MODE;
818 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
819 		    IXGBE_GPIE_OCD;
820 	}
821 
822 	IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
823 }
824 
825 /*
826  * Requires sc->max_frame_size to be set.
827  */
828 void
829 ixgbe_config_delay_values(struct ix_softc *sc)
830 {
831 	struct ixgbe_hw *hw = &sc->hw;
832 	uint32_t rxpb, frame, size, tmp;
833 
834 	frame = sc->max_frame_size;
835 
836 	/* Calculate High Water */
837 	switch (hw->mac.type) {
838 	case ixgbe_mac_X540:
839 	case ixgbe_mac_X550:
840 	case ixgbe_mac_X550EM_x:
841 		tmp = IXGBE_DV_X540(frame, frame);
842 		break;
843 	default:
844 		tmp = IXGBE_DV(frame, frame);
845 		break;
846 	}
847 	size = IXGBE_BT2KB(tmp);
848 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
849 	hw->fc.high_water[0] = rxpb - size;
850 
851 	/* Now calculate Low Water */
852 	switch (hw->mac.type) {
853 	case ixgbe_mac_X540:
854 	case ixgbe_mac_X550:
855 	case ixgbe_mac_X550EM_x:
856 		tmp = IXGBE_LOW_DV_X540(frame);
857 		break;
858 	default:
859 		tmp = IXGBE_LOW_DV(frame);
860 		break;
861 	}
862 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
863 
864 	hw->fc.requested_mode = sc->fc;
865 	hw->fc.pause_time = IXGBE_FC_PAUSE;
866 	hw->fc.send_xon = TRUE;
867 }
868 
869 /*
870  * MSIX Interrupt Handlers
871  */
872 void
873 ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector)
874 {
875 	uint64_t queue = 1ULL << vector;
876 	uint32_t mask;
877 
878 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
879 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
880 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, mask);
881 	} else {
882 		mask = (queue & 0xFFFFFFFF);
883 		if (mask)
884 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(0), mask);
885 		mask = (queue >> 32);
886 		if (mask)
887 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(1), mask);
888 	}
889 }
890 
891 void
892 ixgbe_disable_queue(struct ix_softc *sc, uint32_t vector)
893 {
894 	uint64_t queue = 1ULL << vector;
895 	uint32_t mask;
896 
897 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
898 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
899 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, mask);
900 	} else {
901 		mask = (queue & 0xFFFFFFFF);
902 		if (mask)
903 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), mask);
904 		mask = (queue >> 32);
905 		if (mask)
906 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), mask);
907 	}
908 }
909 
910 /*********************************************************************
911  *
912  *  Legacy Interrupt Service routine
913  *
914  **********************************************************************/
915 
916 int
917 ixgbe_intr(void *arg)
918 {
919 	struct ix_softc	*sc = (struct ix_softc *)arg;
920 	struct ix_queue *que = sc->queues;
921 	struct ifnet	*ifp = &sc->arpcom.ac_if;
922 	struct tx_ring	*txr = sc->tx_rings;
923 	struct ixgbe_hw	*hw = &sc->hw;
924 	uint32_t	 reg_eicr, mod_mask, msf_mask;
925 	int		 i, refill = 0;
926 
927 	reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
928 	if (reg_eicr == 0) {
929 		ixgbe_enable_intr(sc);
930 		return (0);
931 	}
932 
933 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
934 		ixgbe_rxeof(que);
935 		ixgbe_txeof(txr);
936 		refill = 1;
937 	}
938 
939 	if (refill) {
940 		if (ixgbe_rxfill(que->rxr)) {
941 			/* Advance the Rx Queue "Tail Pointer" */
942 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
943 			    que->rxr->last_desc_filled);
944 		} else
945 			timeout_add(&sc->rx_refill, 1);
946 	}
947 
948 	/* Link status change */
949 	if (reg_eicr & IXGBE_EICR_LSC) {
950 		KERNEL_LOCK();
951 		ixgbe_update_link_status(sc);
952 		KERNEL_UNLOCK();
953 		ifq_start(&ifp->if_snd);
954 	}
955 
956 	if (hw->mac.type != ixgbe_mac_82598EB) {
957 		if (reg_eicr & IXGBE_EICR_ECC) {
958 			printf("%s: CRITICAL: ECC ERROR!! "
959 			    "Please Reboot!!\n", sc->dev.dv_xname);
960 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
961 		}
962 		/* Check for over temp condition */
963 		if (reg_eicr & IXGBE_EICR_TS) {
964 			printf("%s: CRITICAL: OVER TEMP!! "
965 			    "PHY IS SHUT DOWN!!\n", ifp->if_xname);
966 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
967 		}
968 	}
969 
970 	/* Pluggable optics-related interrupt */
971 	if (ixgbe_is_sfp(hw)) {
972 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
973 			mod_mask = IXGBE_EICR_GPI_SDP0_X540;
974 			msf_mask = IXGBE_EICR_GPI_SDP1_X540;
975 		} else if (hw->mac.type == ixgbe_mac_X540 ||
976 		    hw->mac.type == ixgbe_mac_X550 ||
977 		    hw->mac.type == ixgbe_mac_X550EM_x) {
978 			mod_mask = IXGBE_EICR_GPI_SDP2_X540;
979 			msf_mask = IXGBE_EICR_GPI_SDP1_X540;
980 		} else {
981 			mod_mask = IXGBE_EICR_GPI_SDP2;
982 			msf_mask = IXGBE_EICR_GPI_SDP1;
983 		}
984 		if (reg_eicr & mod_mask) {
985 			/* Clear the interrupt */
986 			IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
987 			KERNEL_LOCK();
988 			ixgbe_handle_mod(sc);
989 			KERNEL_UNLOCK();
990 		} else if ((hw->phy.media_type != ixgbe_media_type_copper) &&
991 		    (reg_eicr & msf_mask)) {
992 			/* Clear the interrupt */
993 			IXGBE_WRITE_REG(hw, IXGBE_EICR, msf_mask);
994 			KERNEL_LOCK();
995 			ixgbe_handle_msf(sc);
996 			KERNEL_UNLOCK();
997 		}
998 	}
999 
1000 	/* Check for fan failure */
1001 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1002 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1003 		printf("%s: CRITICAL: FAN FAILURE!! "
1004 		    "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
1005 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1006 	}
1007 
1008 	/* External PHY interrupt */
1009 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1010 	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1011 		/* Clear the interrupt */
1012 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1013 		KERNEL_LOCK();
1014 		ixgbe_handle_phy(sc);
1015 		KERNEL_UNLOCK();
1016 	}
1017 
1018 	for (i = 0; i < sc->num_queues; i++, que++)
1019 		ixgbe_enable_queue(sc, que->msix);
1020 
1021 	return (1);
1022 }
1023 
1024 /*********************************************************************
1025  *
1026  *  Media Ioctl callback
1027  *
1028  *  This routine is called whenever the user queries the status of
1029  *  the interface using ifconfig.
1030  *
1031  **********************************************************************/
1032 void
1033 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
1034 {
1035 	struct ix_softc *sc = ifp->if_softc;
1036 
1037 	ifmr->ifm_active = IFM_ETHER;
1038 	ifmr->ifm_status = IFM_AVALID;
1039 
1040 	INIT_DEBUGOUT("ixgbe_media_status: begin");
1041 	ixgbe_update_link_status(sc);
1042 
1043 	if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1044 		ifmr->ifm_status |= IFM_ACTIVE;
1045 
1046 		switch (sc->link_speed) {
1047 		case IXGBE_LINK_SPEED_100_FULL:
1048 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1049 			break;
1050 		case IXGBE_LINK_SPEED_1GB_FULL:
1051 			switch (sc->optics) {
1052 			case IFM_10G_SR: /* multi-speed fiber */
1053 				ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1054 				break;
1055 			case IFM_10G_LR: /* multi-speed fiber */
1056 				ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1057 				break;
1058 			default:
1059 				ifmr->ifm_active |= sc->optics | IFM_FDX;
1060 				break;
1061 			}
1062 			break;
1063 		case IXGBE_LINK_SPEED_10GB_FULL:
1064 			ifmr->ifm_active |= sc->optics | IFM_FDX;
1065 			break;
1066 		}
1067 
1068 		switch (sc->hw.fc.current_mode) {
1069 		case ixgbe_fc_tx_pause:
1070 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1071 			break;
1072 		case ixgbe_fc_rx_pause:
1073 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1074 			break;
1075 		case ixgbe_fc_full:
1076 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
1077 			    IFM_ETH_TXPAUSE;
1078 			break;
1079 		default:
1080 			ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
1081 			    IFM_ETH_TXPAUSE);
1082 			break;
1083 		}
1084 	}
1085 }
1086 
1087 /*********************************************************************
1088  *
1089  *  Media Ioctl callback
1090  *
1091  *  This routine is called when the user changes speed/duplex using
1092  *  media/mediopt option with ifconfig.
1093  *
1094  **********************************************************************/
1095 int
1096 ixgbe_media_change(struct ifnet *ifp)
1097 {
1098 	struct ix_softc	*sc = ifp->if_softc;
1099 	struct ixgbe_hw	*hw = &sc->hw;
1100 	struct ifmedia	*ifm = &sc->media;
1101 	ixgbe_link_speed speed = 0;
1102 
1103 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1104 		return (EINVAL);
1105 
1106 	if (hw->phy.media_type == ixgbe_media_type_backplane)
1107 		return (ENODEV);
1108 
1109 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1110 		case IFM_AUTO:
1111 		case IFM_10G_T:
1112 			speed |= IXGBE_LINK_SPEED_100_FULL;
1113 		case IFM_10G_SR: /* KR, too */
1114 		case IFM_10G_LR:
1115 		case IFM_10G_CX4: /* KX4 */
1116 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1117 		case IFM_10G_SFP_CU:
1118 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1119 			break;
1120 		case IFM_1000_T:
1121 			speed |= IXGBE_LINK_SPEED_100_FULL;
1122 		case IFM_1000_LX:
1123 		case IFM_1000_SX:
1124 		case IFM_1000_CX: /* KX */
1125 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1126 			break;
1127 		case IFM_100_TX:
1128 			speed |= IXGBE_LINK_SPEED_100_FULL;
1129 			break;
1130 		default:
1131 			return (EINVAL);
1132 	}
1133 
1134 	hw->mac.autotry_restart = TRUE;
1135 	hw->mac.ops.setup_link(hw, speed, TRUE);
1136 
1137 	return (0);
1138 }
1139 
1140 /*********************************************************************
1141  *
1142  *  This routine maps the mbufs to tx descriptors, allowing the
1143  *  TX engine to transmit the packets.
1144  *  	- return 0 on success, positive on failure
1145  *
1146  **********************************************************************/
1147 
1148 int
1149 ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
1150 {
1151 	struct ix_softc *sc = txr->sc;
1152 	uint32_t	olinfo_status = 0, cmd_type_len;
1153 	int             i, j, error;
1154 	int		first, last = 0;
1155 	bus_dmamap_t	map;
1156 	struct ixgbe_tx_buf *txbuf;
1157 	union ixgbe_adv_tx_desc *txd = NULL;
1158 
1159 	/* Basic descriptor defines */
1160 	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1161 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1162 
1163 #if NVLAN > 0
1164 	if (m_head->m_flags & M_VLANTAG)
1165 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1166 #endif
1167 
1168 	/*
1169 	 * Important to capture the first descriptor
1170 	 * used because it will contain the index of
1171 	 * the one we tell the hardware to report back
1172 	 */
1173 	first = txr->next_avail_desc;
1174 	txbuf = &txr->tx_buffers[first];
1175 	map = txbuf->map;
1176 
1177 	/*
1178 	 * Map the packet for DMA.
1179 	 */
1180 	error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map, m_head,
1181 	    BUS_DMA_NOWAIT);
1182 	switch (error) {
1183 	case 0:
1184 		break;
1185 	case EFBIG:
1186 		if (m_defrag(m_head, M_NOWAIT) == 0 &&
1187 		    (error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1188 		     m_head, BUS_DMA_NOWAIT)) == 0)
1189 			break;
1190 		/* FALLTHROUGH */
1191 	default:
1192 		sc->no_tx_dma_setup++;
1193 		return (error);
1194 	}
1195 
1196 	/* Make certain there are enough descriptors */
1197 	KASSERT(map->dm_nsegs <= txr->tx_avail - 2);
1198 
1199 	/*
1200 	 * Set the appropriate offload context
1201 	 * this will becomes the first descriptor.
1202 	 */
1203 	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
1204 	if (error)
1205 		goto xmit_fail;
1206 
1207 	i = txr->next_avail_desc;
1208 	for (j = 0; j < map->dm_nsegs; j++) {
1209 		txbuf = &txr->tx_buffers[i];
1210 		txd = &txr->tx_base[i];
1211 
1212 		txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
1213 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
1214 		    cmd_type_len | map->dm_segs[j].ds_len);
1215 		txd->read.olinfo_status = htole32(olinfo_status);
1216 		last = i; /* descriptor that will get completion IRQ */
1217 
1218 		if (++i == sc->num_tx_desc)
1219 			i = 0;
1220 
1221 		txbuf->m_head = NULL;
1222 		txbuf->eop_index = -1;
1223 	}
1224 
1225 	txd->read.cmd_type_len |=
1226 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1227 
1228 	txbuf->m_head = m_head;
1229 	/*
1230 	 * Here we swap the map so the last descriptor,
1231 	 * which gets the completion interrupt has the
1232 	 * real map, and the first descriptor gets the
1233 	 * unused map from this descriptor.
1234 	 */
1235 	txr->tx_buffers[first].map = txbuf->map;
1236 	txbuf->map = map;
1237 	bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1238 	    BUS_DMASYNC_PREWRITE);
1239 
1240 	/* Set the index of the descriptor that will be marked done */
1241 	txbuf = &txr->tx_buffers[first];
1242 	txbuf->eop_index = last;
1243 
1244 	membar_producer();
1245 
1246 	atomic_sub_int(&txr->tx_avail, map->dm_nsegs);
1247 	txr->next_avail_desc = i;
1248 
1249 	++txr->tx_packets;
1250 	return (0);
1251 
1252 xmit_fail:
1253 	bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
1254 	return (error);
1255 }
1256 
1257 void
1258 ixgbe_iff(struct ix_softc *sc)
1259 {
1260 	struct ifnet *ifp = &sc->arpcom.ac_if;
1261 	struct arpcom *ac = &sc->arpcom;
1262 	uint32_t	fctrl;
1263 	uint8_t	*mta;
1264 	uint8_t	*update_ptr;
1265 	struct ether_multi *enm;
1266 	struct ether_multistep step;
1267 	int	mcnt = 0;
1268 
1269 	IOCTL_DEBUGOUT("ixgbe_iff: begin");
1270 
1271 	mta = sc->mta;
1272 	bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1273 	    MAX_NUM_MULTICAST_ADDRESSES);
1274 
1275 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
1276 	fctrl &= ~(IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE);
1277 	ifp->if_flags &= ~IFF_ALLMULTI;
1278 
1279 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1280 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1281 		ifp->if_flags |= IFF_ALLMULTI;
1282 		fctrl |= IXGBE_FCTRL_MPE;
1283 		if (ifp->if_flags & IFF_PROMISC)
1284 			fctrl |= IXGBE_FCTRL_UPE;
1285 	} else {
1286 		ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
1287 		while (enm != NULL) {
1288 			bcopy(enm->enm_addrlo,
1289 			    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1290 			    IXGBE_ETH_LENGTH_OF_ADDRESS);
1291 			mcnt++;
1292 
1293 			ETHER_NEXT_MULTI(step, enm);
1294 		}
1295 
1296 		update_ptr = mta;
1297 		sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
1298 		    ixgbe_mc_array_itr, TRUE);
1299 	}
1300 
1301 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
1302 }
1303 
1304 /*
1305  * This is an iterator function now needed by the multicast
1306  * shared code. It simply feeds the shared code routine the
1307  * addresses in the array of ixgbe_iff() one by one.
1308  */
1309 uint8_t *
1310 ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1311 {
1312 	uint8_t *addr = *update_ptr;
1313 	uint8_t *newptr;
1314 	*vmdq = 0;
1315 
1316 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1317 	*update_ptr = newptr;
1318 	return addr;
1319 }
1320 
1321 void
1322 ixgbe_local_timer(void *arg)
1323 {
1324 	struct ix_softc *sc = arg;
1325 #ifdef IX_DEBUG
1326 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1327 #endif
1328 	int		 s;
1329 
1330 	s = splnet();
1331 
1332 	ixgbe_update_stats_counters(sc);
1333 
1334 #ifdef IX_DEBUG
1335 	if ((ifp->if_flags & (IFF_RUNNING|IFF_DEBUG)) ==
1336 	    (IFF_RUNNING|IFF_DEBUG))
1337 		ixgbe_print_hw_stats(sc);
1338 #endif
1339 
1340 	timeout_add_sec(&sc->timer, 1);
1341 
1342 	splx(s);
1343 }
1344 
1345 void
1346 ixgbe_update_link_status(struct ix_softc *sc)
1347 {
1348 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1349 	int		link_state = LINK_STATE_DOWN;
1350 
1351 	ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
1352 
1353 	ifp->if_baudrate = 0;
1354 	if (sc->link_up) {
1355 		link_state = LINK_STATE_FULL_DUPLEX;
1356 
1357 		switch (sc->link_speed) {
1358 		case IXGBE_LINK_SPEED_UNKNOWN:
1359 			ifp->if_baudrate = 0;
1360 			break;
1361 		case IXGBE_LINK_SPEED_100_FULL:
1362 			ifp->if_baudrate = IF_Mbps(100);
1363 			break;
1364 		case IXGBE_LINK_SPEED_1GB_FULL:
1365 			ifp->if_baudrate = IF_Gbps(1);
1366 			break;
1367 		case IXGBE_LINK_SPEED_10GB_FULL:
1368 			ifp->if_baudrate = IF_Gbps(10);
1369 			break;
1370 		}
1371 
1372 		/* Update any Flow Control changes */
1373 		sc->hw.mac.ops.fc_enable(&sc->hw);
1374 	}
1375 	if (ifp->if_link_state != link_state) {
1376 		ifp->if_link_state = link_state;
1377 		if_link_state_change(ifp);
1378 	}
1379 }
1380 
1381 
1382 /*********************************************************************
1383  *
1384  *  This routine disables all traffic on the adapter by issuing a
1385  *  global reset on the MAC and deallocates TX/RX buffers.
1386  *
1387  **********************************************************************/
1388 
1389 void
1390 ixgbe_stop(void *arg)
1391 {
1392 	struct ix_softc *sc = arg;
1393 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1394 
1395 	/* Tell the stack that the interface is no longer active */
1396 	ifp->if_flags &= ~IFF_RUNNING;
1397 
1398 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
1399 	ixgbe_disable_intr(sc);
1400 
1401 	sc->hw.mac.ops.reset_hw(&sc->hw);
1402 	sc->hw.adapter_stopped = FALSE;
1403 	sc->hw.mac.ops.stop_adapter(&sc->hw);
1404 	if (sc->hw.mac.type == ixgbe_mac_82599EB)
1405 		sc->hw.mac.ops.stop_mac_link_on_d3(&sc->hw);
1406 	/* Turn off the laser */
1407 	if (sc->hw.mac.ops.disable_tx_laser)
1408 		sc->hw.mac.ops.disable_tx_laser(&sc->hw);
1409 	timeout_del(&sc->timer);
1410 	timeout_del(&sc->rx_refill);
1411 
1412 	/* reprogram the RAR[0] in case user changed it. */
1413 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
1414 
1415 	ifq_barrier(&ifp->if_snd);
1416 	intr_barrier(sc->tag);
1417 
1418 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1419 
1420 	ifq_clr_oactive(&ifp->if_snd);
1421 
1422 	/* Should we really clear all structures on stop? */
1423 	ixgbe_free_transmit_structures(sc);
1424 	ixgbe_free_receive_structures(sc);
1425 }
1426 
1427 
1428 /*********************************************************************
1429  *
1430  *  Determine hardware revision.
1431  *
1432  **********************************************************************/
1433 void
1434 ixgbe_identify_hardware(struct ix_softc *sc)
1435 {
1436 	struct ixgbe_osdep	*os = &sc->osdep;
1437 	struct pci_attach_args	*pa = &os->os_pa;
1438 	uint32_t		 reg;
1439 
1440 	/* Save off the information about this board */
1441 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1442 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1443 
1444 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1445 	sc->hw.revision_id = PCI_REVISION(reg);
1446 
1447 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1448 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1449 	sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
1450 
1451 	/* We need this here to set the num_segs below */
1452 	ixgbe_set_mac_type(&sc->hw);
1453 
1454 	/* Pick up the 82599 and VF settings */
1455 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
1456 		sc->hw.phy.smart_speed = ixgbe_smart_speed;
1457 	sc->num_segs = IXGBE_82599_SCATTER;
1458 }
1459 
1460 /*********************************************************************
1461  *
1462  *  Determine optic type
1463  *
1464  **********************************************************************/
1465 void
1466 ixgbe_setup_optics(struct ix_softc *sc)
1467 {
1468 	struct ixgbe_hw *hw = &sc->hw;
1469 	int		layer;
1470 
1471 	layer = hw->mac.ops.get_supported_physical_layer(hw);
1472 
1473 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1474 		sc->optics = IFM_10G_T;
1475 	else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1476 		sc->optics = IFM_1000_T;
1477 	else if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1478 		sc->optics = IFM_100_TX;
1479 	else if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1480 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1481 		sc->optics = IFM_10G_SFP_CU;
1482 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR ||
1483 	    layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1484 		sc->optics = IFM_10G_LR;
1485 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
1486 		sc->optics = IFM_10G_SR;
1487 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
1488 	    layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1489 		sc->optics = IFM_10G_CX4;
1490 	else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1491 		sc->optics = IFM_1000_SX;
1492 	else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_LX)
1493 		sc->optics = IFM_1000_LX;
1494 	else
1495 		sc->optics = IFM_AUTO;
1496 }
1497 
1498 /*********************************************************************
1499  *
1500  *  Setup the Legacy or MSI Interrupt handler
1501  *
1502  **********************************************************************/
1503 int
1504 ixgbe_allocate_legacy(struct ix_softc *sc)
1505 {
1506 	struct ixgbe_osdep	*os = &sc->osdep;
1507 	struct pci_attach_args	*pa = &os->os_pa;
1508 	const char		*intrstr = NULL;
1509 	pci_chipset_tag_t	pc = pa->pa_pc;
1510 	pci_intr_handle_t	ih;
1511 
1512 	/* We allocate a single interrupt resource */
1513 	if (pci_intr_map_msi(pa, &ih) != 0 &&
1514 	    pci_intr_map(pa, &ih) != 0) {
1515 		printf(": couldn't map interrupt\n");
1516 		return (ENXIO);
1517 	}
1518 
1519 #if 0
1520 	/* XXX */
1521 	/* Tasklets for Link, SFP and Multispeed Fiber */
1522 	TASK_INIT(&sc->link_task, 0, ixgbe_handle_link, sc);
1523 	TASK_INIT(&sc->mod_task, 0, ixgbe_handle_mod, sc);
1524 	TASK_INIT(&sc->msf_task, 0, ixgbe_handle_msf, sc);
1525 #endif
1526 
1527 	intrstr = pci_intr_string(pc, ih);
1528 	sc->tag = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
1529 	    ixgbe_intr, sc, sc->dev.dv_xname);
1530 	if (sc->tag == NULL) {
1531 		printf(": couldn't establish interrupt");
1532 		if (intrstr != NULL)
1533 			printf(" at %s", intrstr);
1534 		printf("\n");
1535 		return (ENXIO);
1536 	}
1537 	printf(": %s", intrstr);
1538 
1539 	/* For simplicity in the handlers */
1540 	sc->que_mask = IXGBE_EIMS_ENABLE_MASK;
1541 
1542 	return (0);
1543 }
1544 
1545 int
1546 ixgbe_allocate_pci_resources(struct ix_softc *sc)
1547 {
1548 	struct ixgbe_osdep	*os = &sc->osdep;
1549 	struct pci_attach_args	*pa = &os->os_pa;
1550 	int			 val;
1551 
1552 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
1553 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1554 		printf(": mmba is not mem space\n");
1555 		return (ENXIO);
1556 	}
1557 
1558 	if (pci_mapreg_map(pa, PCIR_BAR(0), PCI_MAPREG_MEM_TYPE(val), 0,
1559 	    &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1560 		printf(": cannot find mem space\n");
1561 		return (ENXIO);
1562 	}
1563 	sc->hw.hw_addr = (uint8_t *)os->os_membase;
1564 
1565 	/* Legacy defaults */
1566 	sc->num_queues = 1;
1567 	sc->hw.back = os;
1568 
1569 #ifdef notyet
1570 	/* Now setup MSI or MSI/X, return us the number of supported vectors. */
1571 	sc->msix = ixgbe_setup_msix(sc);
1572 #endif
1573 
1574 	return (0);
1575 }
1576 
1577 void
1578 ixgbe_free_pci_resources(struct ix_softc * sc)
1579 {
1580 	struct ixgbe_osdep	*os = &sc->osdep;
1581 	struct pci_attach_args	*pa = &os->os_pa;
1582 	struct ix_queue *que = sc->queues;
1583 	int i;
1584 
1585 	/* Release all msix queue resources: */
1586 	for (i = 0; i < sc->num_queues; i++, que++) {
1587 		if (que->tag)
1588 			pci_intr_disestablish(pa->pa_pc, que->tag);
1589 		que->tag = NULL;
1590 	}
1591 
1592 	if (sc->tag)
1593 		pci_intr_disestablish(pa->pa_pc, sc->tag);
1594 	sc->tag = NULL;
1595 	if (os->os_membase != 0)
1596 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1597 	os->os_membase = 0;
1598 }
1599 
1600 /*********************************************************************
1601  *
1602  *  Setup networking device structure and register an interface.
1603  *
1604  **********************************************************************/
1605 void
1606 ixgbe_setup_interface(struct ix_softc *sc)
1607 {
1608 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1609 
1610 	strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
1611 	ifp->if_softc = sc;
1612 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1613 	ifp->if_xflags = IFXF_MPSAFE;
1614 	ifp->if_ioctl = ixgbe_ioctl;
1615 	ifp->if_qstart = ixgbe_start;
1616 	ifp->if_timer = 0;
1617 	ifp->if_watchdog = ixgbe_watchdog;
1618 	ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
1619 	    ETHER_HDR_LEN - ETHER_CRC_LEN;
1620 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1621 
1622 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1623 
1624 #if NVLAN > 0
1625 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1626 #endif
1627 
1628 #ifdef IX_CSUM_OFFLOAD
1629 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1630 #endif
1631 
1632 	/*
1633 	 * Specify the media types supported by this sc and register
1634 	 * callbacks to update media and link information
1635 	 */
1636 	ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
1637 	    ixgbe_media_status);
1638 	ixgbe_add_media_types(sc);
1639 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1640 
1641 	if_attach(ifp);
1642 	ether_ifattach(ifp);
1643 
1644 	sc->max_frame_size = IXGBE_MAX_FRAME_SIZE;
1645 }
1646 
1647 void
1648 ixgbe_add_media_types(struct ix_softc *sc)
1649 {
1650 	struct ixgbe_hw	*hw = &sc->hw;
1651 	int		layer;
1652 
1653 	layer = hw->mac.ops.get_supported_physical_layer(hw);
1654 
1655 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1656 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1657 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1658 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1659 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1660 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1661 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1662 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1663 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
1664 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1665 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1666 		if (hw->phy.multispeed_fiber)
1667 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_LX, 0,
1668 			    NULL);
1669 	}
1670 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1671 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1672 		if (hw->phy.multispeed_fiber)
1673 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0,
1674 			    NULL);
1675 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1676 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1677 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1678 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1679 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1680 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1681 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1682 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1683 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1684 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1685 
1686 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1687 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0,
1688 		    NULL);
1689 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1690 	}
1691 
1692 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1693 }
1694 
1695 void
1696 ixgbe_config_link(struct ix_softc *sc)
1697 {
1698 	uint32_t	autoneg, err = 0;
1699 	bool		negotiate;
1700 
1701 	if (ixgbe_is_sfp(&sc->hw)) {
1702 		if (sc->hw.phy.multispeed_fiber) {
1703 			sc->hw.mac.ops.setup_sfp(&sc->hw);
1704 			if (sc->hw.mac.ops.enable_tx_laser)
1705 				sc->hw.mac.ops.enable_tx_laser(&sc->hw);
1706 			ixgbe_handle_msf(sc);
1707 		} else
1708 			ixgbe_handle_mod(sc);
1709 	} else {
1710 		if (sc->hw.mac.ops.check_link)
1711 			err = sc->hw.mac.ops.check_link(&sc->hw, &autoneg,
1712 			    &sc->link_up, FALSE);
1713 		if (err)
1714 			return;
1715 		autoneg = sc->hw.phy.autoneg_advertised;
1716 		if ((!autoneg) && (sc->hw.mac.ops.get_link_capabilities))
1717 			err = sc->hw.mac.ops.get_link_capabilities(&sc->hw,
1718 			    &autoneg, &negotiate);
1719 		if (err)
1720 			return;
1721 		if (sc->hw.mac.ops.setup_link)
1722 			sc->hw.mac.ops.setup_link(&sc->hw,
1723 			    autoneg, sc->link_up);
1724 	}
1725 }
1726 
1727 /********************************************************************
1728  * Manage DMA'able memory.
1729   *******************************************************************/
1730 int
1731 ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
1732 		struct ixgbe_dma_alloc *dma, int mapflags)
1733 {
1734 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1735 	struct ixgbe_osdep	*os = &sc->osdep;
1736 	int			 r;
1737 
1738 	dma->dma_tag = os->os_pa.pa_dmat;
1739 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1740 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1741 	if (r != 0) {
1742 		printf("%s: ixgbe_dma_malloc: bus_dmamap_create failed; "
1743 		       "error %u\n", ifp->if_xname, r);
1744 		goto fail_0;
1745 	}
1746 
1747 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1748 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1749 	if (r != 0) {
1750 		printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
1751 		       "error %u\n", ifp->if_xname, r);
1752 		goto fail_1;
1753 	}
1754 
1755 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1756 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1757 	if (r != 0) {
1758 		printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
1759 		       "error %u\n", ifp->if_xname, r);
1760 		goto fail_2;
1761 	}
1762 
1763 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1764 	    size, NULL, mapflags | BUS_DMA_NOWAIT);
1765 	if (r != 0) {
1766 		printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
1767 		       "error %u\n", ifp->if_xname, r);
1768 		goto fail_3;
1769 	}
1770 
1771 	dma->dma_size = size;
1772 	return (0);
1773 fail_3:
1774 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1775 fail_2:
1776 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1777 fail_1:
1778 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1779 fail_0:
1780 	dma->dma_map = NULL;
1781 	dma->dma_tag = NULL;
1782 	return (r);
1783 }
1784 
1785 void
1786 ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
1787 {
1788 	if (dma->dma_tag == NULL)
1789 		return;
1790 
1791 	if (dma->dma_map != NULL) {
1792 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1793 		    dma->dma_map->dm_mapsize,
1794 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1795 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1796 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1797 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1798 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1799 		dma->dma_map = NULL;
1800 	}
1801 }
1802 
1803 
1804 /*********************************************************************
1805  *
1806  *  Allocate memory for the transmit and receive rings, and then
1807  *  the descriptors associated with each, called only once at attach.
1808  *
1809  **********************************************************************/
1810 int
1811 ixgbe_allocate_queues(struct ix_softc *sc)
1812 {
1813 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1814 	struct ix_queue *que;
1815 	struct tx_ring *txr;
1816 	struct rx_ring *rxr;
1817 	int rsize, tsize;
1818 	int txconf = 0, rxconf = 0, i;
1819 
1820 	/* First allocate the top level queue structs */
1821 	if (!(sc->queues = mallocarray(sc->num_queues,
1822 	    sizeof(struct ix_queue), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1823 		printf("%s: Unable to allocate queue memory\n", ifp->if_xname);
1824 		goto fail;
1825 	}
1826 
1827 	/* Then allocate the TX ring struct memory */
1828 	if (!(sc->tx_rings = mallocarray(sc->num_queues,
1829 	    sizeof(struct tx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1830 		printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
1831 		goto fail;
1832 	}
1833 
1834 	/* Next allocate the RX */
1835 	if (!(sc->rx_rings = mallocarray(sc->num_queues,
1836 	    sizeof(struct rx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1837 		printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
1838 		goto rx_fail;
1839 	}
1840 
1841 	/* For the ring itself */
1842 	tsize = roundup2(sc->num_tx_desc *
1843 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
1844 
1845 	/*
1846 	 * Now set up the TX queues, txconf is needed to handle the
1847 	 * possibility that things fail midcourse and we need to
1848 	 * undo memory gracefully
1849 	 */
1850 	for (i = 0; i < sc->num_queues; i++, txconf++) {
1851 		/* Set up some basics */
1852 		txr = &sc->tx_rings[i];
1853 		txr->sc = sc;
1854 		txr->me = i;
1855 
1856 		if (ixgbe_dma_malloc(sc, tsize,
1857 		    &txr->txdma, BUS_DMA_NOWAIT)) {
1858 			printf("%s: Unable to allocate TX Descriptor memory\n",
1859 			    ifp->if_xname);
1860 			goto err_tx_desc;
1861 		}
1862 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
1863 		bzero((void *)txr->tx_base, tsize);
1864 	}
1865 
1866 	/*
1867 	 * Next the RX queues...
1868 	 */
1869 	rsize = roundup2(sc->num_rx_desc *
1870 	    sizeof(union ixgbe_adv_rx_desc), 4096);
1871 	for (i = 0; i < sc->num_queues; i++, rxconf++) {
1872 		rxr = &sc->rx_rings[i];
1873 		/* Set up some basics */
1874 		rxr->sc = sc;
1875 		rxr->me = i;
1876 
1877 		if (ixgbe_dma_malloc(sc, rsize,
1878 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
1879 			printf("%s: Unable to allocate RxDescriptor memory\n",
1880 			    ifp->if_xname);
1881 			goto err_rx_desc;
1882 		}
1883 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
1884 		bzero((void *)rxr->rx_base, rsize);
1885 	}
1886 
1887 	/*
1888 	 * Finally set up the queue holding structs
1889 	 */
1890 	for (i = 0; i < sc->num_queues; i++) {
1891 		que = &sc->queues[i];
1892 		que->sc = sc;
1893 		que->txr = &sc->tx_rings[i];
1894 		que->rxr = &sc->rx_rings[i];
1895 	}
1896 
1897 	return (0);
1898 
1899 err_rx_desc:
1900 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
1901 		ixgbe_dma_free(sc, &rxr->rxdma);
1902 err_tx_desc:
1903 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
1904 		ixgbe_dma_free(sc, &txr->txdma);
1905 	free(sc->rx_rings, M_DEVBUF, sc->num_queues * sizeof(struct rx_ring));
1906 	sc->rx_rings = NULL;
1907 rx_fail:
1908 	free(sc->tx_rings, M_DEVBUF, sc->num_queues * sizeof(struct tx_ring));
1909 	sc->tx_rings = NULL;
1910 fail:
1911 	return (ENOMEM);
1912 }
1913 
1914 /*********************************************************************
1915  *
1916  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1917  *  the information needed to transmit a packet on the wire. This is
1918  *  called only once at attach, setup is done every reset.
1919  *
1920  **********************************************************************/
1921 int
1922 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
1923 {
1924 	struct ix_softc 	*sc = txr->sc;
1925 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1926 	struct ixgbe_tx_buf	*txbuf;
1927 	int			 error, i;
1928 
1929 	if (!(txr->tx_buffers = mallocarray(sc->num_tx_desc,
1930 	    sizeof(struct ixgbe_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1931 		printf("%s: Unable to allocate tx_buffer memory\n",
1932 		    ifp->if_xname);
1933 		error = ENOMEM;
1934 		goto fail;
1935 	}
1936 	txr->txtag = txr->txdma.dma_tag;
1937 
1938 	/* Create the descriptor buffer dma maps */
1939 	for (i = 0; i < sc->num_tx_desc; i++) {
1940 		txbuf = &txr->tx_buffers[i];
1941 		error = bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
1942 			    sc->num_segs, PAGE_SIZE, 0,
1943 			    BUS_DMA_NOWAIT, &txbuf->map);
1944 
1945 		if (error != 0) {
1946 			printf("%s: Unable to create TX DMA map\n",
1947 			    ifp->if_xname);
1948 			goto fail;
1949 		}
1950 	}
1951 
1952 	return 0;
1953 fail:
1954 	return (error);
1955 }
1956 
1957 /*********************************************************************
1958  *
1959  *  Initialize a transmit ring.
1960  *
1961  **********************************************************************/
1962 int
1963 ixgbe_setup_transmit_ring(struct tx_ring *txr)
1964 {
1965 	struct ix_softc		*sc = txr->sc;
1966 	int			 error;
1967 
1968 	/* Now allocate transmit buffers for the ring */
1969 	if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
1970 		return (error);
1971 
1972 	/* Clear the old ring contents */
1973 	bzero((void *)txr->tx_base,
1974 	      (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
1975 
1976 	/* Reset indices */
1977 	txr->next_avail_desc = 0;
1978 	txr->next_to_clean = 0;
1979 
1980 	/* Set number of descriptors available */
1981 	txr->tx_avail = sc->num_tx_desc;
1982 
1983 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1984 	    0, txr->txdma.dma_map->dm_mapsize,
1985 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1986 
1987 	return (0);
1988 }
1989 
1990 /*********************************************************************
1991  *
1992  *  Initialize all transmit rings.
1993  *
1994  **********************************************************************/
1995 int
1996 ixgbe_setup_transmit_structures(struct ix_softc *sc)
1997 {
1998 	struct tx_ring *txr = sc->tx_rings;
1999 	int		i, error;
2000 
2001 	for (i = 0; i < sc->num_queues; i++, txr++) {
2002 		if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
2003 			goto fail;
2004 	}
2005 
2006 	return (0);
2007 fail:
2008 	ixgbe_free_transmit_structures(sc);
2009 	return (error);
2010 }
2011 
2012 /*********************************************************************
2013  *
2014  *  Enable transmit unit.
2015  *
2016  **********************************************************************/
2017 void
2018 ixgbe_initialize_transmit_units(struct ix_softc *sc)
2019 {
2020 	struct ifnet	*ifp = &sc->arpcom.ac_if;
2021 	struct tx_ring	*txr;
2022 	struct ixgbe_hw	*hw = &sc->hw;
2023 	int		 i;
2024 	uint64_t	 tdba;
2025 	uint32_t	 txctrl;
2026 
2027 	/* Setup the Base and Length of the Tx Descriptor Ring */
2028 
2029 	for (i = 0; i < sc->num_queues; i++) {
2030 		txr = &sc->tx_rings[i];
2031 
2032 		/* Setup descriptor base address */
2033 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
2034 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2035 		       (tdba & 0x00000000ffffffffULL));
2036 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2037 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2038 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2039 
2040 		/* Setup the HW Tx Head and Tail descriptor pointers */
2041 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2042 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2043 
2044 		/* Setup Transmit Descriptor Cmd Settings */
2045 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2046 		txr->queue_status = IXGBE_QUEUE_IDLE;
2047 		txr->watchdog_timer = 0;
2048 
2049 		/* Disable Head Writeback */
2050 		switch (hw->mac.type) {
2051 		case ixgbe_mac_82598EB:
2052 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2053 			break;
2054 		case ixgbe_mac_82599EB:
2055 		case ixgbe_mac_X540:
2056 		default:
2057 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2058 			break;
2059 		}
2060 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2061 		switch (hw->mac.type) {
2062 		case ixgbe_mac_82598EB:
2063 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2064 			break;
2065 		case ixgbe_mac_82599EB:
2066 		case ixgbe_mac_X540:
2067 		default:
2068 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2069 			break;
2070 		}
2071 	}
2072 	ifp->if_timer = 0;
2073 
2074 	if (hw->mac.type != ixgbe_mac_82598EB) {
2075 		uint32_t dmatxctl, rttdcs;
2076 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2077 		dmatxctl |= IXGBE_DMATXCTL_TE;
2078 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2079 		/* Disable arbiter to set MTQC */
2080 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2081 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
2082 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2083 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2084 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2085 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2086 	}
2087 }
2088 
2089 /*********************************************************************
2090  *
2091  *  Free all transmit rings.
2092  *
2093  **********************************************************************/
2094 void
2095 ixgbe_free_transmit_structures(struct ix_softc *sc)
2096 {
2097 	struct tx_ring *txr = sc->tx_rings;
2098 	int		i;
2099 
2100 	for (i = 0; i < sc->num_queues; i++, txr++)
2101 		ixgbe_free_transmit_buffers(txr);
2102 }
2103 
2104 /*********************************************************************
2105  *
2106  *  Free transmit ring related data structures.
2107  *
2108  **********************************************************************/
2109 void
2110 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2111 {
2112 	struct ix_softc *sc = txr->sc;
2113 	struct ixgbe_tx_buf *tx_buffer;
2114 	int             i;
2115 
2116 	INIT_DEBUGOUT("free_transmit_ring: begin");
2117 
2118 	if (txr->tx_buffers == NULL)
2119 		return;
2120 
2121 	tx_buffer = txr->tx_buffers;
2122 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2123 		if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
2124 			bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2125 			    0, tx_buffer->map->dm_mapsize,
2126 			    BUS_DMASYNC_POSTWRITE);
2127 			bus_dmamap_unload(txr->txdma.dma_tag,
2128 			    tx_buffer->map);
2129 		}
2130 		if (tx_buffer->m_head != NULL) {
2131 			m_freem(tx_buffer->m_head);
2132 			tx_buffer->m_head = NULL;
2133 		}
2134 		if (tx_buffer->map != NULL) {
2135 			bus_dmamap_destroy(txr->txdma.dma_tag,
2136 			    tx_buffer->map);
2137 			tx_buffer->map = NULL;
2138 		}
2139 	}
2140 
2141 	if (txr->tx_buffers != NULL)
2142 		free(txr->tx_buffers, M_DEVBUF,
2143 		    sc->num_tx_desc * sizeof(struct ixgbe_tx_buf));
2144 	txr->tx_buffers = NULL;
2145 	txr->txtag = NULL;
2146 }
2147 
2148 /*********************************************************************
2149  *
2150  *  Advanced Context Descriptor setup for VLAN or CSUM
2151  *
2152  **********************************************************************/
2153 
2154 int
2155 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
2156     uint32_t *cmd_type_len, uint32_t *olinfo_status)
2157 {
2158 	struct ix_softc *sc = txr->sc;
2159 	struct ixgbe_adv_tx_context_desc *TXD;
2160 	struct ixgbe_tx_buf *tx_buffer;
2161 #if NVLAN > 0
2162 	struct ether_vlan_header *eh;
2163 #else
2164 	struct ether_header *eh;
2165 #endif
2166 	struct ip *ip;
2167 #ifdef notyet
2168 	struct ip6_hdr *ip6;
2169 #endif
2170 	struct mbuf *m;
2171 	int	ipoff;
2172 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2173 	int 	ehdrlen, ip_hlen = 0;
2174 	uint16_t etype;
2175 	uint8_t	ipproto = 0;
2176 	int	offload = TRUE;
2177 	int	ctxd = txr->next_avail_desc;
2178 #if NVLAN > 0
2179 	uint16_t vtag = 0;
2180 #endif
2181 
2182 #if notyet
2183 	/* First check if TSO is to be used */
2184 	if (mp->m_pkthdr.csum_flags & CSUM_TSO)
2185 		return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
2186 #endif
2187 
2188 	if ((mp->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) == 0)
2189 		offload = FALSE;
2190 
2191 	/* Indicate the whole packet as payload when not doing TSO */
2192 	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
2193 
2194 	/* Now ready a context descriptor */
2195 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2196 	tx_buffer = &txr->tx_buffers[ctxd];
2197 
2198 	/*
2199 	 * In advanced descriptors the vlan tag must
2200 	 * be placed into the descriptor itself. Hence
2201 	 * we need to make one even if not doing offloads.
2202 	 */
2203 #if NVLAN > 0
2204 	if (mp->m_flags & M_VLANTAG) {
2205 		vtag = mp->m_pkthdr.ether_vtag;
2206 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2207 	} else
2208 #endif
2209 	if (offload == FALSE)
2210 		return (0);	/* No need for CTX */
2211 
2212 	/*
2213 	 * Determine where frame payload starts.
2214 	 * Jump over vlan headers if already present,
2215 	 * helpful for QinQ too.
2216 	 */
2217 	if (mp->m_len < sizeof(struct ether_header))
2218 		return (1);
2219 #if NVLAN > 0
2220 	eh = mtod(mp, struct ether_vlan_header *);
2221 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2222 		if (mp->m_len < sizeof(struct ether_vlan_header))
2223 			return (1);
2224 		etype = ntohs(eh->evl_proto);
2225 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2226 	} else {
2227 		etype = ntohs(eh->evl_encap_proto);
2228 		ehdrlen = ETHER_HDR_LEN;
2229 	}
2230 #else
2231 	eh = mtod(mp, struct ether_header *);
2232 	etype = ntohs(eh->ether_type);
2233 	ehdrlen = ETHER_HDR_LEN;
2234 #endif
2235 
2236 	/* Set the ether header length */
2237 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2238 
2239 	switch (etype) {
2240 	case ETHERTYPE_IP:
2241 		if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip))
2242 			return (1);
2243 		m = m_getptr(mp, ehdrlen, &ipoff);
2244 		KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip));
2245 		ip = (struct ip *)(m->m_data + ipoff);
2246 		ip_hlen = ip->ip_hl << 2;
2247 		ipproto = ip->ip_p;
2248 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2249 		break;
2250 #ifdef notyet
2251 	case ETHERTYPE_IPV6:
2252 		if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip6))
2253 			return (1);
2254 		m = m_getptr(mp, ehdrlen, &ipoff);
2255 		KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip6));
2256 		ip6 = (struct ip6 *)(m->m_data + ipoff);
2257 		ip_hlen = sizeof(*ip6);
2258 		/* XXX-BZ this will go badly in case of ext hdrs. */
2259 		ipproto = ip6->ip6_nxt;
2260 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2261 		break;
2262 #endif
2263 	default:
2264 		offload = FALSE;
2265 		break;
2266 	}
2267 
2268 	vlan_macip_lens |= ip_hlen;
2269 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2270 
2271 	switch (ipproto) {
2272 	case IPPROTO_TCP:
2273 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
2274 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2275 		break;
2276 	case IPPROTO_UDP:
2277 		if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
2278 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2279 		break;
2280 	default:
2281 		offload = FALSE;
2282 		break;
2283 	}
2284 
2285 	if (offload) /* For the TX descriptor setup */
2286 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
2287 
2288 	/* Now copy bits into descriptor */
2289 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
2290 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
2291 	TXD->seqnum_seed = htole32(0);
2292 	TXD->mss_l4len_idx = htole32(0);
2293 
2294 	tx_buffer->m_head = NULL;
2295 	tx_buffer->eop_index = -1;
2296 
2297 	membar_producer();
2298 
2299 	/* We've consumed the first desc, adjust counters */
2300 	if (++ctxd == sc->num_tx_desc)
2301 		ctxd = 0;
2302 	txr->next_avail_desc = ctxd;
2303 	atomic_dec_int(&txr->tx_avail);
2304 
2305 	return (0);
2306 }
2307 
2308 /**********************************************************************
2309  *
2310  *  Examine each tx_buffer in the used queue. If the hardware is done
2311  *  processing the packet then free associated resources. The
2312  *  tx_buffer is put back on the free queue.
2313  *
2314  **********************************************************************/
2315 int
2316 ixgbe_txeof(struct tx_ring *txr)
2317 {
2318 	struct ix_softc			*sc = txr->sc;
2319 	struct ifnet			*ifp = &sc->arpcom.ac_if;
2320 	uint32_t			 first, last, done, processed;
2321 	uint32_t			 num_avail;
2322 	struct ixgbe_tx_buf		*tx_buffer;
2323 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2324 
2325 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2326 		return FALSE;
2327 
2328 	if (txr->tx_avail == sc->num_tx_desc) {
2329 		txr->queue_status = IXGBE_QUEUE_IDLE;
2330 		return FALSE;
2331 	}
2332 
2333 	membar_consumer();
2334 
2335 	processed = 0;
2336 	first = txr->next_to_clean;
2337 	/* was the txt queue cleaned up in the meantime */
2338 	if (txr->tx_buffers == NULL)
2339 		return FALSE;
2340 	tx_buffer = &txr->tx_buffers[first];
2341 	/* For cleanup we just use legacy struct */
2342 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2343 	last = tx_buffer->eop_index;
2344 	if (last == -1)
2345 		return FALSE;
2346 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2347 
2348 	/*
2349 	 * Get the index of the first descriptor
2350 	 * BEYOND the EOP and call that 'done'.
2351 	 * I do this so the comparison in the
2352 	 * inner while loop below can be simple
2353 	 */
2354 	if (++last == sc->num_tx_desc) last = 0;
2355 	done = last;
2356 
2357 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2358 	    0, txr->txdma.dma_map->dm_mapsize,
2359 	    BUS_DMASYNC_POSTREAD);
2360 
2361 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2362 		/* We clean the range of the packet */
2363 		while (first != done) {
2364 			tx_desc->upper.data = 0;
2365 			tx_desc->lower.data = 0;
2366 			tx_desc->buffer_addr = 0;
2367 			++processed;
2368 
2369 			if (tx_buffer->m_head) {
2370 				bus_dmamap_sync(txr->txdma.dma_tag,
2371 				    tx_buffer->map,
2372 				    0, tx_buffer->map->dm_mapsize,
2373 				    BUS_DMASYNC_POSTWRITE);
2374 				bus_dmamap_unload(txr->txdma.dma_tag,
2375 				    tx_buffer->map);
2376 				m_freem(tx_buffer->m_head);
2377 				tx_buffer->m_head = NULL;
2378 			}
2379 			tx_buffer->eop_index = -1;
2380 
2381 			if (++first == sc->num_tx_desc)
2382 				first = 0;
2383 
2384 			tx_buffer = &txr->tx_buffers[first];
2385 			tx_desc = (struct ixgbe_legacy_tx_desc *)
2386 			    &txr->tx_base[first];
2387 		}
2388 		++txr->packets;
2389 		/* See if there is more work now */
2390 		last = tx_buffer->eop_index;
2391 		if (last != -1) {
2392 			eop_desc =
2393 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2394 			/* Get next done point */
2395 			if (++last == sc->num_tx_desc) last = 0;
2396 			done = last;
2397 		} else
2398 			break;
2399 	}
2400 
2401 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2402 	    0, txr->txdma.dma_map->dm_mapsize,
2403 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2404 
2405 	txr->next_to_clean = first;
2406 
2407 	num_avail = atomic_add_int_nv(&txr->tx_avail, processed);
2408 
2409 	/* All clean, turn off the timer */
2410 	if (num_avail == sc->num_tx_desc)
2411 		ifp->if_timer = 0;
2412 
2413 	if (ifq_is_oactive(&ifp->if_snd))
2414 		ifq_restart(&ifp->if_snd);
2415 
2416 	return TRUE;
2417 }
2418 
2419 /*********************************************************************
2420  *
2421  *  Get a buffer from system mbuf buffer pool.
2422  *
2423  **********************************************************************/
2424 int
2425 ixgbe_get_buf(struct rx_ring *rxr, int i)
2426 {
2427 	struct ix_softc		*sc = rxr->sc;
2428 	struct ixgbe_rx_buf	*rxbuf;
2429 	struct mbuf		*mp;
2430 	int			error;
2431 	union ixgbe_adv_rx_desc	*rxdesc;
2432 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2433 
2434 	rxbuf = &rxr->rx_buffers[i];
2435 	rxdesc = &rxr->rx_base[i];
2436 	if (rxbuf->buf) {
2437 		printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
2438 		    sc->dev.dv_xname, i);
2439 		return (ENOBUFS);
2440 	}
2441 
2442 	/* needed in any case so prealocate since this one will fail for sure */
2443 	mp = MCLGETI(NULL, M_DONTWAIT, NULL, sc->rx_mbuf_sz);
2444 	if (!mp)
2445 		return (ENOBUFS);
2446 
2447 	mp->m_len = mp->m_pkthdr.len = sc->rx_mbuf_sz;
2448 	m_adj(mp, ETHER_ALIGN);
2449 
2450 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
2451 	    mp, BUS_DMA_NOWAIT);
2452 	if (error) {
2453 		m_freem(mp);
2454 		return (error);
2455 	}
2456 
2457 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2458 	    0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2459 	rxbuf->buf = mp;
2460 
2461 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2462 	    dsize * i, dsize, BUS_DMASYNC_POSTWRITE);
2463 
2464 	rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
2465 
2466 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2467 	    dsize * i, dsize, BUS_DMASYNC_PREWRITE);
2468 
2469 	return (0);
2470 }
2471 
2472 /*********************************************************************
2473  *
2474  *  Allocate memory for rx_buffer structures. Since we use one
2475  *  rx_buffer per received packet, the maximum number of rx_buffer's
2476  *  that we'll need is equal to the number of receive descriptors
2477  *  that we've allocated.
2478  *
2479  **********************************************************************/
2480 int
2481 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2482 {
2483 	struct ix_softc		*sc = rxr->sc;
2484 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2485 	struct ixgbe_rx_buf 	*rxbuf;
2486 	int			i, error;
2487 
2488 	if (!(rxr->rx_buffers = mallocarray(sc->num_rx_desc,
2489 	    sizeof(struct ixgbe_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2490 		printf("%s: Unable to allocate rx_buffer memory\n",
2491 		    ifp->if_xname);
2492 		error = ENOMEM;
2493 		goto fail;
2494 	}
2495 
2496 	rxbuf = rxr->rx_buffers;
2497 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2498 		error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,
2499 		    16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->map);
2500 		if (error) {
2501 			printf("%s: Unable to create Pack DMA map\n",
2502 			    ifp->if_xname);
2503 			goto fail;
2504 		}
2505 	}
2506 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2507 	    rxr->rxdma.dma_map->dm_mapsize,
2508 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2509 
2510 	return (0);
2511 
2512 fail:
2513 	return (error);
2514 }
2515 
2516 /*********************************************************************
2517  *
2518  *  Initialize a receive ring and its buffers.
2519  *
2520  **********************************************************************/
2521 int
2522 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2523 {
2524 	struct ix_softc		*sc = rxr->sc;
2525 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2526 	int			 rsize, error;
2527 
2528 	rsize = roundup2(sc->num_rx_desc *
2529 	    sizeof(union ixgbe_adv_rx_desc), 4096);
2530 	/* Clear the ring contents */
2531 	bzero((void *)rxr->rx_base, rsize);
2532 
2533 	if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
2534 		return (error);
2535 
2536 	/* Setup our descriptor indices */
2537 	rxr->next_to_check = 0;
2538 	rxr->last_desc_filled = sc->num_rx_desc - 1;
2539 
2540 	if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
2541 	    sc->num_rx_desc);
2542 
2543 	ixgbe_rxfill(rxr);
2544 	if (if_rxr_inuse(&rxr->rx_ring) == 0) {
2545 		printf("%s: unable to fill any rx descriptors\n",
2546 		    sc->dev.dv_xname);
2547 		return (ENOBUFS);
2548 	}
2549 
2550 	return (0);
2551 }
2552 
2553 int
2554 ixgbe_rxfill(struct rx_ring *rxr)
2555 {
2556 	struct ix_softc *sc = rxr->sc;
2557 	int		 post = 0;
2558 	u_int		 slots;
2559 	int		 i;
2560 
2561 	i = rxr->last_desc_filled;
2562 	for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc);
2563 	    slots > 0; slots--) {
2564 		if (++i == sc->num_rx_desc)
2565 			i = 0;
2566 
2567 		if (ixgbe_get_buf(rxr, i) != 0)
2568 			break;
2569 
2570 		rxr->last_desc_filled = i;
2571 		post = 1;
2572 	}
2573 
2574 	if_rxr_put(&rxr->rx_ring, slots);
2575 
2576 	return (post);
2577 }
2578 
2579 void
2580 ixgbe_rxrefill(void *xsc)
2581 {
2582 	struct ix_softc *sc = xsc;
2583 	struct ix_queue *que = sc->queues;
2584 	int s;
2585 
2586 	s = splnet();
2587 	if (ixgbe_rxfill(que->rxr)) {
2588 		/* Advance the Rx Queue "Tail Pointer" */
2589 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
2590 		    que->rxr->last_desc_filled);
2591 	} else
2592 		timeout_add(&sc->rx_refill, 1);
2593 	splx(s);
2594 }
2595 
2596 /*********************************************************************
2597  *
2598  *  Initialize all receive rings.
2599  *
2600  **********************************************************************/
2601 int
2602 ixgbe_setup_receive_structures(struct ix_softc *sc)
2603 {
2604 	struct rx_ring *rxr = sc->rx_rings;
2605 	int i;
2606 
2607 	for (i = 0; i < sc->num_queues; i++, rxr++)
2608 		if (ixgbe_setup_receive_ring(rxr))
2609 			goto fail;
2610 
2611 	return (0);
2612 fail:
2613 	ixgbe_free_receive_structures(sc);
2614 	return (ENOBUFS);
2615 }
2616 
2617 /*********************************************************************
2618  *
2619  *  Setup receive registers and features.
2620  *
2621  **********************************************************************/
2622 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2623 
2624 void
2625 ixgbe_initialize_receive_units(struct ix_softc *sc)
2626 {
2627 	struct rx_ring	*rxr = sc->rx_rings;
2628 	struct ixgbe_hw	*hw = &sc->hw;
2629 	uint32_t	bufsz, fctrl, srrctl, rxcsum;
2630 	uint32_t	hlreg;
2631 	int		i;
2632 
2633 	/*
2634 	 * Make sure receives are disabled while
2635 	 * setting up the descriptor ring
2636 	 */
2637 	ixgbe_disable_rx(hw);
2638 
2639 	/* Enable broadcasts */
2640 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2641 	fctrl |= IXGBE_FCTRL_BAM;
2642 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2643 		fctrl |= IXGBE_FCTRL_DPF;
2644 		fctrl |= IXGBE_FCTRL_PMCF;
2645 	}
2646 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2647 
2648 	/* Always enable jumbo frame reception */
2649 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2650 	hlreg |= IXGBE_HLREG0_JUMBOEN;
2651 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2652 
2653 	bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2654 
2655 	for (i = 0; i < sc->num_queues; i++, rxr++) {
2656 		uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2657 
2658 		/* Setup the Base and Length of the Rx Descriptor Ring */
2659 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2660 			       (rdba & 0x00000000ffffffffULL));
2661 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2662 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2663 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2664 
2665 		/* Set up the SRRCTL register */
2666 		srrctl = bufsz | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2667 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2668 
2669 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
2670 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2671 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2672 	}
2673 
2674 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2675 		uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR |
2676 			      IXGBE_PSRTYPE_UDPHDR |
2677 			      IXGBE_PSRTYPE_IPV4HDR |
2678 			      IXGBE_PSRTYPE_IPV6HDR;
2679 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2680 	}
2681 
2682 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2683 	rxcsum &= ~IXGBE_RXCSUM_PCSD;
2684 
2685 	/* Setup RSS */
2686 	if (sc->num_queues > 1) {
2687 		ixgbe_initialize_rss_mapping(sc);
2688 
2689 		/* RSS and RX IPP Checksum are mutually exclusive */
2690 		rxcsum |= IXGBE_RXCSUM_PCSD;
2691 	}
2692 
2693 	/* This is useful for calculating UDP/IP fragment checksums */
2694 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2695 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
2696 
2697 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2698 }
2699 
2700 void
2701 ixgbe_initialize_rss_mapping(struct ix_softc *sc)
2702 {
2703 	struct ixgbe_hw	*hw = &sc->hw;
2704 	uint32_t reta = 0, mrqc, rss_key[10];
2705 	int i, j, queue_id, table_size, index_mult;
2706 
2707 	/* set up random bits */
2708 	arc4random_buf(&rss_key, sizeof(rss_key));
2709 
2710 	/* Set multiplier for RETA setup and table size based on MAC */
2711 	index_mult = 0x1;
2712 	table_size = 128;
2713 	switch (sc->hw.mac.type) {
2714 	case ixgbe_mac_82598EB:
2715 		index_mult = 0x11;
2716 		break;
2717 	case ixgbe_mac_X550:
2718 	case ixgbe_mac_X550EM_x:
2719 		table_size = 512;
2720 		break;
2721 	default:
2722 		break;
2723 	}
2724 
2725 	/* Set up the redirection table */
2726 	for (i = 0, j = 0; i < table_size; i++, j++) {
2727 		if (j == sc->num_queues) j = 0;
2728 		queue_id = (j * index_mult);
2729 		/*
2730 		 * The low 8 bits are for hash value (n+0);
2731 		 * The next 8 bits are for hash value (n+1), etc.
2732 		 */
2733 		reta = reta >> 8;
2734 		reta = reta | ( ((uint32_t) queue_id) << 24);
2735 		if ((i & 3) == 3) {
2736 			if (i < 128)
2737 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2738 			else
2739 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
2740 				    reta);
2741 			reta = 0;
2742 		}
2743 	}
2744 
2745 	/* Now fill our hash function seeds */
2746 	for (i = 0; i < 10; i++)
2747 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2748 
2749 	/*
2750 	 * Disable UDP - IP fragments aren't currently being handled
2751 	 * and so we end up with a mix of 2-tuple and 4-tuple
2752 	 * traffic.
2753 	 */
2754 	mrqc = IXGBE_MRQC_RSSEN
2755 	     | IXGBE_MRQC_RSS_FIELD_IPV4
2756 	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2757 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2758 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2759 	     | IXGBE_MRQC_RSS_FIELD_IPV6
2760 	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2761 	;
2762 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2763 }
2764 
2765 /*********************************************************************
2766  *
2767  *  Free all receive rings.
2768  *
2769  **********************************************************************/
2770 void
2771 ixgbe_free_receive_structures(struct ix_softc *sc)
2772 {
2773 	struct rx_ring *rxr;
2774 	int		i;
2775 
2776 	for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2777 		if_rxr_init(&rxr->rx_ring, 0, 0);
2778 
2779 	for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2780 		ixgbe_free_receive_buffers(rxr);
2781 }
2782 
2783 /*********************************************************************
2784  *
2785  *  Free receive ring data structures
2786  *
2787  **********************************************************************/
2788 void
2789 ixgbe_free_receive_buffers(struct rx_ring *rxr)
2790 {
2791 	struct ix_softc		*sc;
2792 	struct ixgbe_rx_buf	*rxbuf;
2793 	int			 i;
2794 
2795 	sc = rxr->sc;
2796 	if (rxr->rx_buffers != NULL) {
2797 		for (i = 0; i < sc->num_rx_desc; i++) {
2798 			rxbuf = &rxr->rx_buffers[i];
2799 			if (rxbuf->buf != NULL) {
2800 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2801 				    0, rxbuf->map->dm_mapsize,
2802 				    BUS_DMASYNC_POSTREAD);
2803 				bus_dmamap_unload(rxr->rxdma.dma_tag,
2804 				    rxbuf->map);
2805 				m_freem(rxbuf->buf);
2806 				rxbuf->buf = NULL;
2807 			}
2808 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
2809 			rxbuf->map = NULL;
2810 		}
2811 		free(rxr->rx_buffers, M_DEVBUF,
2812 		    sc->num_rx_desc * sizeof(struct ixgbe_rx_buf));
2813 		rxr->rx_buffers = NULL;
2814 	}
2815 }
2816 
2817 /*********************************************************************
2818  *
2819  *  This routine executes in interrupt context. It replenishes
2820  *  the mbufs in the descriptor and sends data which has been
2821  *  dma'ed into host memory to upper layer.
2822  *
2823  *********************************************************************/
2824 int
2825 ixgbe_rxeof(struct ix_queue *que)
2826 {
2827 	struct ix_softc 	*sc = que->sc;
2828 	struct rx_ring		*rxr = que->rxr;
2829 	struct ifnet   		*ifp = &sc->arpcom.ac_if;
2830 	struct mbuf_list	 ml = MBUF_LIST_INITIALIZER();
2831 	struct mbuf    		*mp, *sendmp;
2832 	uint8_t		    	 eop = 0;
2833 	uint16_t		 len, vtag;
2834 	uint32_t		 staterr = 0, ptype;
2835 	struct ixgbe_rx_buf	*rxbuf, *nxbuf;
2836 	union ixgbe_adv_rx_desc	*rxdesc;
2837 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2838 	int			 i, nextp;
2839 
2840 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2841 		return FALSE;
2842 
2843 	i = rxr->next_to_check;
2844 	while (if_rxr_inuse(&rxr->rx_ring) > 0) {
2845 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2846 		    dsize * i, dsize, BUS_DMASYNC_POSTREAD);
2847 
2848 		rxdesc = &rxr->rx_base[i];
2849 		staterr = letoh32(rxdesc->wb.upper.status_error);
2850 		if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
2851 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2852 			    dsize * i, dsize,
2853 			    BUS_DMASYNC_PREREAD);
2854 			break;
2855 		}
2856 
2857 		/* Zero out the receive descriptors status  */
2858 		rxdesc->wb.upper.status_error = 0;
2859 		rxbuf = &rxr->rx_buffers[i];
2860 
2861 		/* pull the mbuf off the ring */
2862 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
2863 		    rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2864 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
2865 
2866 		mp = rxbuf->buf;
2867 		len = letoh16(rxdesc->wb.upper.length);
2868 		ptype = letoh32(rxdesc->wb.lower.lo_dword.data) &
2869 		    IXGBE_RXDADV_PKTTYPE_MASK;
2870 		vtag = letoh16(rxdesc->wb.upper.vlan);
2871 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
2872 
2873 		if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
2874 			sc->dropped_pkts++;
2875 
2876 			if (rxbuf->fmp) {
2877 				m_freem(rxbuf->fmp);
2878 				rxbuf->fmp = NULL;
2879 			}
2880 
2881 			m_freem(mp);
2882 			rxbuf->buf = NULL;
2883 			goto next_desc;
2884 		}
2885 
2886 		if (mp == NULL) {
2887 			panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
2888 			    "(nrx %d, filled %d)", sc->dev.dv_xname,
2889 			    i, if_rxr_inuse(&rxr->rx_ring),
2890 			    rxr->last_desc_filled);
2891 		}
2892 
2893 		/* Currently no HW RSC support of 82599 */
2894 		if (!eop) {
2895 			/*
2896 			 * Figure out the next descriptor of this frame.
2897 			 */
2898 			nextp = i + 1;
2899 			if (nextp == sc->num_rx_desc)
2900 				nextp = 0;
2901 			nxbuf = &rxr->rx_buffers[nextp];
2902 			/* prefetch(nxbuf); */
2903 		}
2904 
2905 		/*
2906 		 * Rather than using the fmp/lmp global pointers
2907 		 * we now keep the head of a packet chain in the
2908 		 * buffer struct and pass this along from one
2909 		 * descriptor to the next, until we get EOP.
2910 		 */
2911 		mp->m_len = len;
2912 		/*
2913 		 * See if there is a stored head
2914 		 * that determines what we are
2915 		 */
2916 		sendmp = rxbuf->fmp;
2917 		rxbuf->buf = rxbuf->fmp = NULL;
2918 
2919 		if (sendmp != NULL) /* secondary frag */
2920 			sendmp->m_pkthdr.len += mp->m_len;
2921 		else {
2922 			/* first desc of a non-ps chain */
2923 			sendmp = mp;
2924 			sendmp->m_pkthdr.len = mp->m_len;
2925 #if NVLAN > 0
2926 			if (staterr & IXGBE_RXD_STAT_VP) {
2927 				sendmp->m_pkthdr.ether_vtag = vtag;
2928 				sendmp->m_flags |= M_VLANTAG;
2929 			}
2930 #endif
2931 		}
2932 
2933 		/* Pass the head pointer on */
2934 		if (eop == 0) {
2935 			nxbuf->fmp = sendmp;
2936 			sendmp = NULL;
2937 			mp->m_next = nxbuf->buf;
2938 		} else { /* Sending this frame? */
2939 			rxr->rx_packets++;
2940 			/* capture data for AIM */
2941 			rxr->bytes += sendmp->m_pkthdr.len;
2942 			rxr->rx_bytes += sendmp->m_pkthdr.len;
2943 
2944 			ixgbe_rx_checksum(staterr, sendmp, ptype);
2945 
2946 			ml_enqueue(&ml, sendmp);
2947 		}
2948 next_desc:
2949 		if_rxr_put(&rxr->rx_ring, 1);
2950 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2951 		    dsize * i, dsize,
2952 		    BUS_DMASYNC_PREREAD);
2953 
2954 		/* Advance our pointers to the next descriptor. */
2955 		if (++i == sc->num_rx_desc)
2956 			i = 0;
2957 	}
2958 	rxr->next_to_check = i;
2959 
2960 	if_input(ifp, &ml);
2961 
2962 	if (!(staterr & IXGBE_RXD_STAT_DD))
2963 		return FALSE;
2964 
2965 	return TRUE;
2966 }
2967 
2968 /*********************************************************************
2969  *
2970  *  Verify that the hardware indicated that the checksum is valid.
2971  *  Inform the stack about the status of checksum so that stack
2972  *  doesn't spend time verifying the checksum.
2973  *
2974  *********************************************************************/
2975 void
2976 ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp, uint32_t ptype)
2977 {
2978 	uint16_t status = (uint16_t) staterr;
2979 	uint8_t  errors = (uint8_t) (staterr >> 24);
2980 
2981 	if (status & IXGBE_RXD_STAT_IPCS) {
2982 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
2983 			/* IP Checksum Good */
2984 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
2985 		} else
2986 			mp->m_pkthdr.csum_flags = 0;
2987 	}
2988 	if (status & IXGBE_RXD_STAT_L4CS) {
2989 		if (!(errors & IXGBE_RXD_ERR_TCPE))
2990 			mp->m_pkthdr.csum_flags |=
2991 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
2992 	}
2993 }
2994 
2995 void
2996 ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
2997 {
2998 	uint32_t	ctrl;
2999 	int		i;
3000 
3001 	/*
3002 	 * A soft reset zero's out the VFTA, so
3003 	 * we need to repopulate it now.
3004 	 */
3005 	for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
3006 		if (sc->shadow_vfta[i] != 0)
3007 			IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTA(i),
3008 			    sc->shadow_vfta[i]);
3009 	}
3010 
3011 	ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
3012 #if 0
3013 	/* Enable the Filter Table if enabled */
3014 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3015 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3016 		ctrl |= IXGBE_VLNCTRL_VFE;
3017 	}
3018 #endif
3019 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
3020 		ctrl |= IXGBE_VLNCTRL_VME;
3021 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
3022 
3023 	/* On 82599 the VLAN enable is per/queue in RXDCTL */
3024 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3025 		for (i = 0; i < sc->num_queues; i++) {
3026 			ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
3027 			ctrl |= IXGBE_RXDCTL_VME;
3028 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), ctrl);
3029 		}
3030 	}
3031 }
3032 
3033 void
3034 ixgbe_enable_intr(struct ix_softc *sc)
3035 {
3036 	struct ixgbe_hw *hw = &sc->hw;
3037 	struct ix_queue *que = sc->queues;
3038 	uint32_t	mask, fwsm;
3039 	int i;
3040 
3041 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3042 	/* Enable Fan Failure detection */
3043 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3044 		    mask |= IXGBE_EIMS_GPI_SDP1;
3045 
3046 	switch (sc->hw.mac.type) {
3047 	case ixgbe_mac_82599EB:
3048 		mask |= IXGBE_EIMS_ECC;
3049 		/* Temperature sensor on some adapters */
3050 		mask |= IXGBE_EIMS_GPI_SDP0;
3051 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3052 		mask |= IXGBE_EIMS_GPI_SDP1;
3053 		mask |= IXGBE_EIMS_GPI_SDP2;
3054 		break;
3055 	case ixgbe_mac_X540:
3056 		mask |= IXGBE_EIMS_ECC;
3057 		/* Detect if Thermal Sensor is enabled */
3058 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3059 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3060 			mask |= IXGBE_EIMS_TS;
3061 		break;
3062 	case ixgbe_mac_X550:
3063 	case ixgbe_mac_X550EM_x:
3064 		mask |= IXGBE_EIMS_ECC;
3065 		/* MAC thermal sensor is automatically enabled */
3066 		mask |= IXGBE_EIMS_TS;
3067 		/* Some devices use SDP0 for important information */
3068 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3069 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3070 			mask |= IXGBE_EIMS_GPI_SDP0_X540;
3071 	default:
3072 		break;
3073 	}
3074 
3075 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3076 
3077 	/* With MSI-X we use auto clear */
3078 	if (sc->msix > 1) {
3079 		mask = IXGBE_EIMS_ENABLE_MASK;
3080 		/* Don't autoclear Link */
3081 		mask &= ~IXGBE_EIMS_OTHER;
3082 		mask &= ~IXGBE_EIMS_LSC;
3083 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3084 	}
3085 
3086 	/*
3087 	 * Now enable all queues, this is done separately to
3088 	 * allow for handling the extended (beyond 32) MSIX
3089 	 * vectors that can be used by 82599
3090 	 */
3091 	for (i = 0; i < sc->num_queues; i++, que++)
3092 		ixgbe_enable_queue(sc, que->msix);
3093 
3094 	IXGBE_WRITE_FLUSH(hw);
3095 }
3096 
3097 void
3098 ixgbe_disable_intr(struct ix_softc *sc)
3099 {
3100 	if (sc->msix > 1)
3101 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3102 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3103 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3104 	} else {
3105 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3106 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3107 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3108 	}
3109 	IXGBE_WRITE_FLUSH(&sc->hw);
3110 }
3111 
3112 uint16_t
3113 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
3114 {
3115 	struct pci_attach_args	*pa;
3116 	uint32_t value;
3117 	int high = 0;
3118 
3119 	if (reg & 0x2) {
3120 		high = 1;
3121 		reg &= ~0x2;
3122 	}
3123 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3124 	value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3125 
3126 	if (high)
3127 		value >>= 16;
3128 
3129 	return (value & 0xffff);
3130 }
3131 
3132 void
3133 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value)
3134 {
3135 	struct pci_attach_args	*pa;
3136 	uint32_t rv;
3137 	int high = 0;
3138 
3139 	/* Need to do read/mask/write... because 16 vs 32 bit!!! */
3140 	if (reg & 0x2) {
3141 		high = 1;
3142 		reg &= ~0x2;
3143 	}
3144 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3145 	rv = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3146 	if (!high)
3147 		rv = (rv & 0xffff0000) | value;
3148 	else
3149 		rv = (rv & 0xffff) | ((uint32_t)value << 16);
3150 	pci_conf_write(pa->pa_pc, pa->pa_tag, reg, rv);
3151 }
3152 
3153 /*
3154  * Setup the correct IVAR register for a particular MSIX interrupt
3155  *   (yes this is all very magic and confusing :)
3156  *  - entry is the register array entry
3157  *  - vector is the MSIX vector for this queue
3158  *  - type is RX/TX/MISC
3159  */
3160 void
3161 ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
3162 {
3163 	struct ixgbe_hw *hw = &sc->hw;
3164 	uint32_t ivar, index;
3165 
3166 	vector |= IXGBE_IVAR_ALLOC_VAL;
3167 
3168 	switch (hw->mac.type) {
3169 
3170 	case ixgbe_mac_82598EB:
3171 		if (type == -1)
3172 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3173 		else
3174 			entry += (type * 64);
3175 		index = (entry >> 2) & 0x1F;
3176 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3177 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3178 		ivar |= (vector << (8 * (entry & 0x3)));
3179 		IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3180 		break;
3181 
3182 	case ixgbe_mac_82599EB:
3183 	case ixgbe_mac_X540:
3184 	case ixgbe_mac_X550:
3185 	case ixgbe_mac_X550EM_x:
3186 		if (type == -1) { /* MISC IVAR */
3187 			index = (entry & 1) * 8;
3188 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3189 			ivar &= ~(0xFF << index);
3190 			ivar |= (vector << index);
3191 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3192 		} else {	/* RX/TX IVARS */
3193 			index = (16 * (entry & 1)) + (8 * type);
3194 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3195 			ivar &= ~(0xFF << index);
3196 			ivar |= (vector << index);
3197 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3198 		}
3199 
3200 	default:
3201 		break;
3202 	}
3203 }
3204 
3205 void
3206 ixgbe_configure_ivars(struct ix_softc *sc)
3207 {
3208 #if notyet
3209 	struct ix_queue *que = sc->queues;
3210 	uint32_t newitr;
3211 	int i;
3212 
3213 	if (ixgbe_max_interrupt_rate > 0)
3214 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3215 	else
3216 		newitr = 0;
3217 
3218 	for (i = 0; i < sc->num_queues; i++, que++) {
3219 		/* First the RX queue entry */
3220 		ixgbe_set_ivar(sc, i, que->msix, 0);
3221 		/* ... and the TX */
3222 		ixgbe_set_ivar(sc, i, que->msix, 1);
3223 		/* Set an Initial EITR value */
3224 		IXGBE_WRITE_REG(&sc->hw,
3225 		    IXGBE_EITR(que->msix), newitr);
3226 	}
3227 
3228 	/* For the Link interrupt */
3229 	ixgbe_set_ivar(sc, 1, sc->linkvec, -1);
3230 #endif
3231 }
3232 
3233 /*
3234  * SFP module interrupts handler
3235  */
3236 void
3237 ixgbe_handle_mod(struct ix_softc *sc)
3238 {
3239 	struct ixgbe_hw *hw = &sc->hw;
3240 	uint32_t err;
3241 
3242 	err = hw->phy.ops.identify_sfp(hw);
3243 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3244 		printf("%s: Unsupported SFP+ module type was detected!\n",
3245 		    sc->dev.dv_xname);
3246 		return;
3247 	}
3248 	err = hw->mac.ops.setup_sfp(hw);
3249 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3250 		printf("%s: Setup failure - unsupported SFP+ module type!\n",
3251 		    sc->dev.dv_xname);
3252 		return;
3253 	}
3254 	/* Set the optics type so system reports correctly */
3255 	ixgbe_setup_optics(sc);
3256 
3257 	ixgbe_handle_msf(sc);
3258 }
3259 
3260 
3261 /*
3262  * MSF (multispeed fiber) interrupts handler
3263  */
3264 void
3265 ixgbe_handle_msf(struct ix_softc *sc)
3266 {
3267 	struct ixgbe_hw *hw = &sc->hw;
3268 	uint32_t autoneg;
3269 	bool negotiate;
3270 
3271 	autoneg = hw->phy.autoneg_advertised;
3272 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) {
3273 		if (hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate))
3274 			return;
3275 	}
3276 	if (hw->mac.ops.setup_link)
3277 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3278 
3279 	ifmedia_delete_instance(&sc->media, IFM_INST_ANY);
3280 	ixgbe_add_media_types(sc);
3281 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
3282 }
3283 
3284 /*
3285  * External PHY interrupts handler
3286  */
3287 void
3288 ixgbe_handle_phy(struct ix_softc *sc)
3289 {
3290 	struct ixgbe_hw *hw = &sc->hw;
3291 	int error;
3292 
3293 	error = hw->phy.ops.handle_lasi(hw);
3294 	if (error == IXGBE_ERR_OVERTEMP)
3295 		printf("%s: CRITICAL: EXTERNAL PHY OVER TEMP!! "
3296 		    " PHY will downshift to lower power state!\n",
3297 		    sc->dev.dv_xname);
3298 	else if (error)
3299 		printf("%s: Error handling LASI interrupt: %d\n",
3300 		    sc->dev.dv_xname, error);
3301 
3302 }
3303 
3304 /**********************************************************************
3305  *
3306  *  Update the board statistics counters.
3307  *
3308  **********************************************************************/
3309 void
3310 ixgbe_update_stats_counters(struct ix_softc *sc)
3311 {
3312 	struct ifnet	*ifp = &sc->arpcom.ac_if;
3313 	struct ixgbe_hw	*hw = &sc->hw;
3314 	uint64_t	total_missed_rx = 0;
3315 #ifdef IX_DEBUG
3316 	uint32_t	missed_rx = 0, bprc, lxon, lxoff, total;
3317 	int		i;
3318 #endif
3319 
3320 	sc->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3321 	sc->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3322 
3323 #ifdef IX_DEBUG
3324 	for (i = 0; i < 8; i++) {
3325 		uint32_t mp;
3326 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3327 		/* missed_rx tallies misses for the gprc workaround */
3328 		missed_rx += mp;
3329 		/* global total per queue */
3330 		sc->stats.mpc[i] += mp;
3331 		/* running comprehensive total for stats display */
3332 		total_missed_rx += sc->stats.mpc[i];
3333 		if (hw->mac.type == ixgbe_mac_82598EB)
3334 			sc->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3335 	}
3336 
3337 	/* Hardware workaround, gprc counts missed packets */
3338 	sc->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3339 	sc->stats.gprc -= missed_rx;
3340 
3341 	if (hw->mac.type != ixgbe_mac_82598EB) {
3342 		sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3343 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3344 		sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3345 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3346 		sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3347 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3348 		sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3349 		sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3350 	} else {
3351 		sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3352 		sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3353 		/* 82598 only has a counter in the high register */
3354 		sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3355 		sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3356 		sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3357 	}
3358 
3359 	/*
3360 	 * Workaround: mprc hardware is incorrectly counting
3361 	 * broadcasts, so for now we subtract those.
3362 	 */
3363 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3364 	sc->stats.bprc += bprc;
3365 	sc->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3366 	if (hw->mac.type == ixgbe_mac_82598EB)
3367 		sc->stats.mprc -= bprc;
3368 
3369 	sc->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3370 	sc->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3371 	sc->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3372 	sc->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3373 	sc->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3374 	sc->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3375 	sc->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3376 
3377 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3378 	sc->stats.lxontxc += lxon;
3379 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3380 	sc->stats.lxofftxc += lxoff;
3381 	total = lxon + lxoff;
3382 
3383 	sc->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3384 	sc->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3385 	sc->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3386 	sc->stats.gptc -= total;
3387 	sc->stats.mptc -= total;
3388 	sc->stats.ptc64 -= total;
3389 	sc->stats.gotc -= total * ETHER_MIN_LEN;
3390 
3391 	sc->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3392 	sc->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3393 	sc->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3394 	sc->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3395 	sc->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3396 	sc->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3397 	sc->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3398 	sc->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3399 	sc->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3400 	sc->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3401 #endif
3402 
3403 	/* Fill out the OS statistics structure */
3404 	ifp->if_collisions = 0;
3405 	ifp->if_oerrors = sc->watchdog_events;
3406 	ifp->if_ierrors = total_missed_rx + sc->stats.crcerrs + sc->stats.rlec;
3407 }
3408 
3409 #ifdef IX_DEBUG
3410 /**********************************************************************
3411  *
3412  *  This routine is called only when ixgbe_display_debug_stats is enabled.
3413  *  This routine provides a way to take a look at important statistics
3414  *  maintained by the driver and hardware.
3415  *
3416  **********************************************************************/
3417 void
3418 ixgbe_print_hw_stats(struct ix_softc * sc)
3419 {
3420 	struct ifnet   *ifp = &sc->arpcom.ac_if;
3421 
3422 	printf("%s: missed pkts %llu, rx len errs %llu, crc errs %llu, "
3423 	    "dropped pkts %lu, watchdog timeouts %ld, "
3424 	    "XON rx %llu, XON tx %llu, XOFF rx %llu, XOFF tx %llu, "
3425 	    "total pkts rx %llu, good pkts rx %llu, good pkts tx %llu, "
3426 	    "tso tx %lu\n",
3427 	    ifp->if_xname,
3428 	    (long long)sc->stats.mpc[0],
3429 	    (long long)sc->stats.roc + (long long)sc->stats.ruc,
3430 	    (long long)sc->stats.crcerrs,
3431 	    sc->dropped_pkts,
3432 	    sc->watchdog_events,
3433 	    (long long)sc->stats.lxonrxc,
3434 	    (long long)sc->stats.lxontxc,
3435 	    (long long)sc->stats.lxoffrxc,
3436 	    (long long)sc->stats.lxofftxc,
3437 	    (long long)sc->stats.tpr,
3438 	    (long long)sc->stats.gprc,
3439 	    (long long)sc->stats.gptc,
3440 	    sc->tso_tx);
3441 }
3442 #endif
3443