xref: /openbsd-src/sys/dev/pci/if_ix.c (revision d59bb9942320b767f2a19aaa7690c8c6e30b724c)
1 /*	$OpenBSD: if_ix.c,v 1.150 2017/01/24 03:57:35 dlg Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2013, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /* FreeBSD: src/sys/dev/ixgbe/ixgbe.c 251964 Jun 18 21:28:19 2013 UTC */
36 
37 #include <dev/pci/if_ix.h>
38 #include <dev/pci/ixgbe_type.h>
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 /* char ixgbe_driver_version[] = "2.5.13"; */
44 
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *********************************************************************/
50 
51 const struct pci_matchid ixgbe_devices[] = {
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598 },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_BX },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT2 },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
60 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
61 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_XF_LR },
62 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_SFP },
63 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_SR_DUAL_EM },
64 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_DA_DUAL },
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4 },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4_MEZZ },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_XAUI },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_COMBO_BP },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_CX4 },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_T3_LOM },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_EM },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF_QP },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF2 },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_FCOE },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599EN_SFP },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_QSFP_SF_QP },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T1 },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T1 },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KX4 },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KR },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_SFP },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_10G_T },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_1G_T },
88 };
89 
90 /*********************************************************************
91  *  Function prototypes
92  *********************************************************************/
93 int	ixgbe_probe(struct device *, void *, void *);
94 void	ixgbe_attach(struct device *, struct device *, void *);
95 int	ixgbe_detach(struct device *, int);
96 void	ixgbe_start(struct ifqueue *);
97 int	ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
98 int	ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *);
99 void	ixgbe_watchdog(struct ifnet *);
100 void	ixgbe_init(void *);
101 void	ixgbe_stop(void *);
102 void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
103 int	ixgbe_media_change(struct ifnet *);
104 void	ixgbe_identify_hardware(struct ix_softc *);
105 int	ixgbe_allocate_pci_resources(struct ix_softc *);
106 int	ixgbe_allocate_legacy(struct ix_softc *);
107 int	ixgbe_allocate_queues(struct ix_softc *);
108 void	ixgbe_free_pci_resources(struct ix_softc *);
109 void	ixgbe_local_timer(void *);
110 void	ixgbe_setup_interface(struct ix_softc *);
111 void	ixgbe_config_gpie(struct ix_softc *);
112 void	ixgbe_config_delay_values(struct ix_softc *);
113 void	ixgbe_add_media_types(struct ix_softc *);
114 void	ixgbe_config_link(struct ix_softc *);
115 
116 int	ixgbe_allocate_transmit_buffers(struct tx_ring *);
117 int	ixgbe_setup_transmit_structures(struct ix_softc *);
118 int	ixgbe_setup_transmit_ring(struct tx_ring *);
119 void	ixgbe_initialize_transmit_units(struct ix_softc *);
120 void	ixgbe_free_transmit_structures(struct ix_softc *);
121 void	ixgbe_free_transmit_buffers(struct tx_ring *);
122 
123 int	ixgbe_allocate_receive_buffers(struct rx_ring *);
124 int	ixgbe_setup_receive_structures(struct ix_softc *);
125 int	ixgbe_setup_receive_ring(struct rx_ring *);
126 void	ixgbe_initialize_receive_units(struct ix_softc *);
127 void	ixgbe_free_receive_structures(struct ix_softc *);
128 void	ixgbe_free_receive_buffers(struct rx_ring *);
129 void	ixgbe_initialize_rss_mapping(struct ix_softc *);
130 int	ixgbe_rxfill(struct rx_ring *);
131 void	ixgbe_rxrefill(void *);
132 
133 void	ixgbe_enable_intr(struct ix_softc *);
134 void	ixgbe_disable_intr(struct ix_softc *);
135 void	ixgbe_update_stats_counters(struct ix_softc *);
136 int	ixgbe_txeof(struct tx_ring *);
137 int	ixgbe_rxeof(struct ix_queue *);
138 void	ixgbe_rx_checksum(uint32_t, struct mbuf *, uint32_t);
139 void	ixgbe_iff(struct ix_softc *);
140 #ifdef IX_DEBUG
141 void	ixgbe_print_hw_stats(struct ix_softc *);
142 #endif
143 void	ixgbe_update_link_status(struct ix_softc *);
144 int	ixgbe_get_buf(struct rx_ring *, int);
145 int	ixgbe_encap(struct tx_ring *, struct mbuf *);
146 int	ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
147 		    struct ixgbe_dma_alloc *, int);
148 void	ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
149 int	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *, uint32_t *,
150 	    uint32_t *);
151 int	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *,
152 	    uint32_t *);
153 void	ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
154 void	ixgbe_configure_ivars(struct ix_softc *);
155 uint8_t	*ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
156 
157 void	ixgbe_setup_vlan_hw_support(struct ix_softc *);
158 
159 /* Support for pluggable optic modules */
160 void	ixgbe_setup_optics(struct ix_softc *);
161 void	ixgbe_handle_mod(struct ix_softc *);
162 void	ixgbe_handle_msf(struct ix_softc *);
163 void	ixgbe_handle_phy(struct ix_softc *);
164 
165 /* Legacy (single vector interrupt handler */
166 int	ixgbe_intr(void *);
167 void	ixgbe_enable_queue(struct ix_softc *, uint32_t);
168 void	ixgbe_disable_queue(struct ix_softc *, uint32_t);
169 void	ixgbe_rearm_queue(struct ix_softc *, uint32_t);
170 
171 /*********************************************************************
172  *  OpenBSD Device Interface Entry Points
173  *********************************************************************/
174 
175 struct cfdriver ix_cd = {
176 	NULL, "ix", DV_IFNET
177 };
178 
179 struct cfattach ix_ca = {
180 	sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach
181 };
182 
183 int ixgbe_smart_speed = ixgbe_smart_speed_on;
184 
185 /*********************************************************************
186  *  Device identification routine
187  *
188  *  ixgbe_probe determines if the driver should be loaded on
189  *  adapter based on PCI vendor/device id of the adapter.
190  *
191  *  return 0 on success, positive on failure
192  *********************************************************************/
193 
194 int
195 ixgbe_probe(struct device *parent, void *match, void *aux)
196 {
197 	INIT_DEBUGOUT("ixgbe_probe: begin");
198 
199 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
200 	    nitems(ixgbe_devices)));
201 }
202 
203 /*********************************************************************
204  *  Device initialization routine
205  *
206  *  The attach entry point is called when the driver is being loaded.
207  *  This routine identifies the type of hardware, allocates all resources
208  *  and initializes the hardware.
209  *
210  *  return 0 on success, positive on failure
211  *********************************************************************/
212 
213 void
214 ixgbe_attach(struct device *parent, struct device *self, void *aux)
215 {
216 	struct pci_attach_args	*pa = (struct pci_attach_args *)aux;
217 	struct ix_softc		*sc = (struct ix_softc *)self;
218 	int			 error = 0;
219 	uint16_t		 csum;
220 	uint32_t			 ctrl_ext;
221 	struct ixgbe_hw		*hw = &sc->hw;
222 
223 	INIT_DEBUGOUT("ixgbe_attach: begin");
224 
225 	sc->osdep.os_sc = sc;
226 	sc->osdep.os_pa = *pa;
227 
228 	/* Set up the timer callout */
229 	timeout_set(&sc->timer, ixgbe_local_timer, sc);
230 	timeout_set(&sc->rx_refill, ixgbe_rxrefill, sc);
231 
232 	/* Determine hardware revision */
233 	ixgbe_identify_hardware(sc);
234 
235 	/* Indicate to RX setup to use Jumbo Clusters */
236 	sc->num_tx_desc = DEFAULT_TXD;
237 	sc->num_rx_desc = DEFAULT_RXD;
238 
239 	/* Do base PCI setup - map BAR0 */
240 	if (ixgbe_allocate_pci_resources(sc))
241 		goto err_out;
242 
243 	/* Allocate our TX/RX Queues */
244 	if (ixgbe_allocate_queues(sc))
245 		goto err_out;
246 
247 	/* Allocate multicast array memory. */
248 	sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS,
249 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
250 	if (sc->mta == NULL) {
251 		printf(": Can not allocate multicast setup array\n");
252 		goto err_late;
253 	}
254 
255 	/* Initialize the shared code */
256 	error = ixgbe_init_shared_code(hw);
257 	if (error) {
258 		printf(": Unable to initialize the shared code\n");
259 		goto err_late;
260 	}
261 
262 	/* Make sure we have a good EEPROM before we read from it */
263 	if (sc->hw.eeprom.ops.validate_checksum(&sc->hw, &csum) < 0) {
264 		printf(": The EEPROM Checksum Is Not Valid\n");
265 		goto err_late;
266 	}
267 
268 	error = ixgbe_init_hw(hw);
269 	if (error == IXGBE_ERR_EEPROM_VERSION) {
270 		printf(": This device is a pre-production adapter/"
271 		    "LOM.  Please be aware there may be issues associated "
272 		    "with your hardware.\nIf you are experiencing problems "
273 		    "please contact your Intel or hardware representative "
274 		    "who provided you with this hardware.\n");
275 	} else if (error && (error != IXGBE_ERR_SFP_NOT_PRESENT &&
276 	    error != IXGBE_ERR_SFP_NOT_SUPPORTED)) {
277 		printf(": Hardware Initialization Failure\n");
278 		goto err_late;
279 	}
280 
281 	/* Detect and set physical type */
282 	ixgbe_setup_optics(sc);
283 
284 	bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
285 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
286 
287 	error = ixgbe_allocate_legacy(sc);
288 	if (error)
289 		goto err_late;
290 
291 	/* Enable the optics for 82599 SFP+ fiber */
292 	if (sc->hw.mac.ops.enable_tx_laser)
293 		sc->hw.mac.ops.enable_tx_laser(&sc->hw);
294 
295 	/* Enable power to the phy */
296 	if (hw->phy.ops.set_phy_power)
297 		hw->phy.ops.set_phy_power(&sc->hw, TRUE);
298 
299 	/* Setup OS specific network interface */
300 	ixgbe_setup_interface(sc);
301 
302 	/* Initialize statistics */
303 	ixgbe_update_stats_counters(sc);
304 
305 	/* Get the PCI-E bus info and determine LAN ID */
306 	hw->mac.ops.get_bus_info(hw);
307 
308 	/* Set an initial default flow control value */
309 	sc->fc = ixgbe_fc_full;
310 
311 	/* let hardware know driver is loaded */
312 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
313 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
314 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
315 
316 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
317 
318 	INIT_DEBUGOUT("ixgbe_attach: end");
319 	return;
320 
321 err_late:
322 	ixgbe_free_transmit_structures(sc);
323 	ixgbe_free_receive_structures(sc);
324 err_out:
325 	ixgbe_free_pci_resources(sc);
326 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
327 	    MAX_NUM_MULTICAST_ADDRESSES);
328 }
329 
330 /*********************************************************************
331  *  Device removal routine
332  *
333  *  The detach entry point is called when the driver is being removed.
334  *  This routine stops the adapter and deallocates all the resources
335  *  that were allocated for driver operation.
336  *
337  *  return 0 on success, positive on failure
338  *********************************************************************/
339 
340 int
341 ixgbe_detach(struct device *self, int flags)
342 {
343 	struct ix_softc *sc = (struct ix_softc *)self;
344 	struct ifnet *ifp = &sc->arpcom.ac_if;
345 	uint32_t	ctrl_ext;
346 
347 	INIT_DEBUGOUT("ixgbe_detach: begin");
348 
349 	ixgbe_stop(sc);
350 
351 	/* let hardware know driver is unloading */
352 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
353 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
354 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
355 
356 	ether_ifdetach(ifp);
357 	if_detach(ifp);
358 
359 	timeout_del(&sc->timer);
360 	timeout_del(&sc->rx_refill);
361 	ixgbe_free_pci_resources(sc);
362 
363 	ixgbe_free_transmit_structures(sc);
364 	ixgbe_free_receive_structures(sc);
365 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
366 	    MAX_NUM_MULTICAST_ADDRESSES);
367 
368 	return (0);
369 }
370 
371 /*********************************************************************
372  *  Transmit entry point
373  *
374  *  ixgbe_start is called by the stack to initiate a transmit.
375  *  The driver will remain in this routine as long as there are
376  *  packets to transmit and transmit resources are available.
377  *  In case resources are not available stack is notified and
378  *  the packet is requeued.
379  **********************************************************************/
380 
381 void
382 ixgbe_start(struct ifqueue *ifq)
383 {
384 	struct ifnet		*ifp = ifq->ifq_if;
385 	struct ix_softc		*sc = ifp->if_softc;
386 	struct tx_ring		*txr = sc->tx_rings;
387 	struct mbuf  		*m_head;
388 	int			 post = 0;
389 
390 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(ifq))
391 		return;
392 	if (!sc->link_up)
393 		return;
394 
395 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, 0,
396 	    txr->txdma.dma_map->dm_mapsize,
397 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
398 
399 	for (;;) {
400 		/* Check that we have the minimal number of TX descriptors. */
401 		if (txr->tx_avail <= IXGBE_TX_OP_THRESHOLD) {
402 			ifq_set_oactive(ifq);
403 			break;
404 		}
405 
406 		m_head = ifq_dequeue(ifq);
407 		if (m_head == NULL)
408 			break;
409 
410 		if (ixgbe_encap(txr, m_head)) {
411 			m_freem(m_head);
412 			continue;
413 		}
414 
415 #if NBPFILTER > 0
416 		if (ifp->if_bpf)
417 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
418 #endif
419 
420 		/* Set timeout in case hardware has problems transmitting */
421 		txr->watchdog_timer = IXGBE_TX_TIMEOUT;
422 		ifp->if_timer = IXGBE_TX_TIMEOUT;
423 
424 		post = 1;
425 	}
426 
427 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
428 	    0, txr->txdma.dma_map->dm_mapsize,
429 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
430 
431 	/*
432 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
433 	 * hardware that this frame is available to transmit.
434 	 */
435 	if (post)
436 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
437 		    txr->next_avail_desc);
438 }
439 
440 /*********************************************************************
441  *  Ioctl entry point
442  *
443  *  ixgbe_ioctl is called when the user wants to configure the
444  *  interface.
445  *
446  *  return 0 on success, positive on failure
447  **********************************************************************/
448 
449 int
450 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
451 {
452 	struct ix_softc	*sc = ifp->if_softc;
453 	struct ifreq	*ifr = (struct ifreq *) data;
454 	int		s, error = 0;
455 
456 	s = splnet();
457 
458 	switch (command) {
459 	case SIOCSIFADDR:
460 		IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
461 		ifp->if_flags |= IFF_UP;
462 		if (!(ifp->if_flags & IFF_RUNNING))
463 			ixgbe_init(sc);
464 		break;
465 
466 	case SIOCSIFFLAGS:
467 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
468 		if (ifp->if_flags & IFF_UP) {
469 			if (ifp->if_flags & IFF_RUNNING)
470 				error = ENETRESET;
471 			else
472 				ixgbe_init(sc);
473 		} else {
474 			if (ifp->if_flags & IFF_RUNNING)
475 				ixgbe_stop(sc);
476 		}
477 		break;
478 
479 	case SIOCSIFMEDIA:
480 	case SIOCGIFMEDIA:
481 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
482 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
483 		break;
484 
485 	case SIOCGIFRXR:
486 		error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
487 		break;
488 
489 	default:
490 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
491 	}
492 
493 	if (error == ENETRESET) {
494 		if (ifp->if_flags & IFF_RUNNING) {
495 			ixgbe_disable_intr(sc);
496 			ixgbe_iff(sc);
497 			ixgbe_enable_intr(sc);
498 		}
499 		error = 0;
500 	}
501 
502 	splx(s);
503 	return (error);
504 }
505 
506 int
507 ixgbe_rxrinfo(struct ix_softc *sc, struct if_rxrinfo *ifri)
508 {
509 	struct if_rxring_info *ifr, ifr1;
510 	struct rx_ring *rxr;
511 	int error, i;
512 	u_int n = 0;
513 
514 	if (sc->num_queues > 1) {
515 		if ((ifr = mallocarray(sc->num_queues, sizeof(*ifr), M_DEVBUF,
516 		    M_WAITOK | M_ZERO)) == NULL)
517 			return (ENOMEM);
518 	} else
519 		ifr = &ifr1;
520 
521 	for (i = 0; i < sc->num_queues; i++) {
522 		rxr = &sc->rx_rings[i];
523 		ifr[n].ifr_size = sc->rx_mbuf_sz;
524 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
525 		ifr[n].ifr_info = rxr->rx_ring;
526 		n++;
527 	}
528 
529 	error = if_rxr_info_ioctl(ifri, sc->num_queues, ifr);
530 
531 	if (sc->num_queues > 1)
532 		free(ifr, M_DEVBUF, sc->num_queues * sizeof(*ifr));
533 	return (error);
534 }
535 
536 /*********************************************************************
537  *  Watchdog entry point
538  *
539  **********************************************************************/
540 
541 void
542 ixgbe_watchdog(struct ifnet * ifp)
543 {
544 	struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
545 	struct tx_ring *txr = sc->tx_rings;
546 	struct ixgbe_hw *hw = &sc->hw;
547 	int		tx_hang = FALSE;
548 	int		i;
549 
550 	/*
551 	 * The timer is set to 5 every time ixgbe_start() queues a packet.
552 	 * Anytime all descriptors are clean the timer is set to 0.
553 	 */
554 	for (i = 0; i < sc->num_queues; i++, txr++) {
555 		if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
556 			continue;
557 		else {
558 			tx_hang = TRUE;
559 			break;
560 		}
561 	}
562 	if (tx_hang == FALSE)
563 		return;
564 
565 	/*
566 	 * If we are in this routine because of pause frames, then don't
567 	 * reset the hardware.
568 	 */
569 	if (!(IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXON)) {
570 		for (i = 0; i < sc->num_queues; i++, txr++)
571 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
572 		ifp->if_timer = IXGBE_TX_TIMEOUT;
573 		return;
574 	}
575 
576 
577 	printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
578 	for (i = 0; i < sc->num_queues; i++, txr++) {
579 		printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
580 		    IXGBE_READ_REG(hw, IXGBE_TDH(i)),
581 		    IXGBE_READ_REG(hw, IXGBE_TDT(i)));
582 		printf("%s: TX(%d) desc avail = %d, Next TX to Clean = %d\n", ifp->if_xname,
583 		    i, txr->tx_avail, txr->next_to_clean);
584 	}
585 	ifp->if_flags &= ~IFF_RUNNING;
586 	sc->watchdog_events++;
587 
588 	ixgbe_init(sc);
589 }
590 
591 /*********************************************************************
592  *  Init entry point
593  *
594  *  This routine is used in two ways. It is used by the stack as
595  *  init entry point in network interface structure. It is also used
596  *  by the driver as a hw/sw initialization routine to get to a
597  *  consistent state.
598  *
599  *  return 0 on success, positive on failure
600  **********************************************************************/
601 #define IXGBE_MHADD_MFS_SHIFT 16
602 
603 void
604 ixgbe_init(void *arg)
605 {
606 	struct ix_softc	*sc = (struct ix_softc *)arg;
607 	struct ifnet	*ifp = &sc->arpcom.ac_if;
608 	struct rx_ring	*rxr = sc->rx_rings;
609 	uint32_t	 k, txdctl, rxdctl, rxctrl, mhadd, itr;
610 	int		 i, s, err;
611 
612 	INIT_DEBUGOUT("ixgbe_init: begin");
613 
614 	s = splnet();
615 
616 	ixgbe_stop(sc);
617 
618 	/* reprogram the RAR[0] in case user changed it. */
619 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
620 
621 	/* Get the latest mac address, User can use a LAA */
622 	bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
623 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
624 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, 1);
625 	sc->hw.addr_ctrl.rar_used_count = 1;
626 
627 	/* Prepare transmit descriptors and buffers */
628 	if (ixgbe_setup_transmit_structures(sc)) {
629 		printf("%s: Could not setup transmit structures\n",
630 		    ifp->if_xname);
631 		ixgbe_stop(sc);
632 		splx(s);
633 		return;
634 	}
635 
636 	ixgbe_init_hw(&sc->hw);
637 	ixgbe_initialize_transmit_units(sc);
638 
639 	/* Use 2k clusters, even for jumbo frames */
640 	sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
641 
642 	/* Prepare receive descriptors and buffers */
643 	if (ixgbe_setup_receive_structures(sc)) {
644 		printf("%s: Could not setup receive structures\n",
645 		    ifp->if_xname);
646 		ixgbe_stop(sc);
647 		splx(s);
648 		return;
649 	}
650 
651 	/* Configure RX settings */
652 	ixgbe_initialize_receive_units(sc);
653 
654 	/* Enable SDP & MSIX interrupts based on adapter */
655 	ixgbe_config_gpie(sc);
656 
657 	/* Program promiscuous mode and multicast filters. */
658 	ixgbe_iff(sc);
659 
660 	/* Set MRU size */
661 	mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
662 	mhadd &= ~IXGBE_MHADD_MFS_MASK;
663 	mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
664 	IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
665 
666 	/* Now enable all the queues */
667 	for (i = 0; i < sc->num_queues; i++) {
668 		txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
669 		txdctl |= IXGBE_TXDCTL_ENABLE;
670 		/* Set WTHRESH to 8, burst writeback */
671 		txdctl |= (8 << 16);
672 		/*
673 		 * When the internal queue falls below PTHRESH (16),
674 		 * start prefetching as long as there are at least
675 		 * HTHRESH (1) buffers ready.
676 		 */
677 		txdctl |= (16 << 0) | (1 << 8);
678 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
679 	}
680 
681 	for (i = 0; i < sc->num_queues; i++) {
682 		rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
683 		if (sc->hw.mac.type == ixgbe_mac_82598EB) {
684 			/*
685 			 * PTHRESH = 21
686 			 * HTHRESH = 4
687 			 * WTHRESH = 8
688 			 */
689 			rxdctl &= ~0x3FFFFF;
690 			rxdctl |= 0x080420;
691 		}
692 		rxdctl |= IXGBE_RXDCTL_ENABLE;
693 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
694 		for (k = 0; k < 10; k++) {
695 			if (IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i)) &
696 			    IXGBE_RXDCTL_ENABLE)
697 				break;
698 			else
699 				msec_delay(1);
700 		}
701 		IXGBE_WRITE_FLUSH(&sc->hw);
702 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_desc_filled);
703 	}
704 
705 	/* Set up VLAN support and filter */
706 	ixgbe_setup_vlan_hw_support(sc);
707 
708 	/* Enable Receive engine */
709 	rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
710 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
711 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
712 	rxctrl |= IXGBE_RXCTRL_RXEN;
713 	sc->hw.mac.ops.enable_rx_dma(&sc->hw, rxctrl);
714 
715 	timeout_add_sec(&sc->timer, 1);
716 
717 	/* Set up MSI/X routing */
718 	if (sc->msix > 1) {
719 		ixgbe_configure_ivars(sc);
720 		/* Set up auto-mask */
721 		if (sc->hw.mac.type == ixgbe_mac_82598EB)
722 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
723 		else {
724 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
725 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
726 		}
727 	} else {  /* Simple settings for Legacy/MSI */
728 		ixgbe_set_ivar(sc, 0, 0, 0);
729 		ixgbe_set_ivar(sc, 0, 0, 1);
730 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
731 	}
732 
733 	/* Check on any SFP devices that need to be kick-started */
734 	if (sc->hw.phy.type == ixgbe_phy_none) {
735 		err = sc->hw.phy.ops.identify(&sc->hw);
736 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
737 			printf("Unsupported SFP+ module type was detected.\n");
738 			splx(s);
739 			return;
740 		}
741 	}
742 
743 	/* Setup interrupt moderation */
744 	itr = (4000000 / IXGBE_INTS_PER_SEC) & 0xff8;
745 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
746 		itr |= IXGBE_EITR_LLI_MOD | IXGBE_EITR_CNT_WDIS;
747 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(0), itr);
748 
749 	/* Enable power to the phy */
750 	if (sc->hw.phy.ops.set_phy_power)
751 		sc->hw.phy.ops.set_phy_power(&sc->hw, TRUE);
752 
753 	/* Config/Enable Link */
754 	ixgbe_config_link(sc);
755 
756 	/* Hardware Packet Buffer & Flow Control setup */
757 	ixgbe_config_delay_values(sc);
758 
759 	/* Initialize the FC settings */
760 	sc->hw.mac.ops.start_hw(&sc->hw);
761 
762 	/* And now turn on interrupts */
763 	ixgbe_enable_intr(sc);
764 
765 	/* Now inform the stack we're ready */
766 	ifp->if_flags |= IFF_RUNNING;
767 	ifq_clr_oactive(&ifp->if_snd);
768 
769 	splx(s);
770 }
771 
772 void
773 ixgbe_config_gpie(struct ix_softc *sc)
774 {
775 	struct ixgbe_hw	*hw = &sc->hw;
776 	uint32_t gpie;
777 
778 	gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
779 
780 	/* Fan Failure Interrupt */
781 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
782 		gpie |= IXGBE_SDP1_GPIEN;
783 
784 	if (sc->hw.mac.type == ixgbe_mac_82599EB) {
785 		/* Add for Module detection */
786 		gpie |= IXGBE_SDP2_GPIEN;
787 
788 		/* Media ready */
789 		if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
790 			gpie |= IXGBE_SDP1_GPIEN;
791 
792 		/*
793 		 * Set LL interval to max to reduce the number of low latency
794 		 * interrupts hitting the card when the ring is getting full.
795 		 */
796 		gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
797 	}
798 
799 	if (sc->hw.mac.type == ixgbe_mac_X540 ||
800 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
801 	    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
802 		/*
803 		 * Thermal Failure Detection (X540)
804 		 * Link Detection (X552 SFP+, X552/X557-AT)
805 		 */
806 		gpie |= IXGBE_SDP0_GPIEN_X540;
807 
808 		/*
809 		 * Set LL interval to max to reduce the number of low latency
810 		 * interrupts hitting the card when the ring is getting full.
811 		 */
812 		gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
813 	}
814 
815 	if (sc->msix > 1) {
816 		/* Enable Enhanced MSIX mode */
817 		gpie |= IXGBE_GPIE_MSIX_MODE;
818 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
819 		    IXGBE_GPIE_OCD;
820 	}
821 
822 	IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
823 }
824 
825 /*
826  * Requires sc->max_frame_size to be set.
827  */
828 void
829 ixgbe_config_delay_values(struct ix_softc *sc)
830 {
831 	struct ixgbe_hw *hw = &sc->hw;
832 	uint32_t rxpb, frame, size, tmp;
833 
834 	frame = sc->max_frame_size;
835 
836 	/* Calculate High Water */
837 	switch (hw->mac.type) {
838 	case ixgbe_mac_X540:
839 	case ixgbe_mac_X550:
840 	case ixgbe_mac_X550EM_x:
841 		tmp = IXGBE_DV_X540(frame, frame);
842 		break;
843 	default:
844 		tmp = IXGBE_DV(frame, frame);
845 		break;
846 	}
847 	size = IXGBE_BT2KB(tmp);
848 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
849 	hw->fc.high_water[0] = rxpb - size;
850 
851 	/* Now calculate Low Water */
852 	switch (hw->mac.type) {
853 	case ixgbe_mac_X540:
854 	case ixgbe_mac_X550:
855 	case ixgbe_mac_X550EM_x:
856 		tmp = IXGBE_LOW_DV_X540(frame);
857 		break;
858 	default:
859 		tmp = IXGBE_LOW_DV(frame);
860 		break;
861 	}
862 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
863 
864 	hw->fc.requested_mode = sc->fc;
865 	hw->fc.pause_time = IXGBE_FC_PAUSE;
866 	hw->fc.send_xon = TRUE;
867 }
868 
869 /*
870  * MSIX Interrupt Handlers
871  */
872 void
873 ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector)
874 {
875 	uint64_t queue = 1ULL << vector;
876 	uint32_t mask;
877 
878 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
879 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
880 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, mask);
881 	} else {
882 		mask = (queue & 0xFFFFFFFF);
883 		if (mask)
884 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(0), mask);
885 		mask = (queue >> 32);
886 		if (mask)
887 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(1), mask);
888 	}
889 }
890 
891 void
892 ixgbe_disable_queue(struct ix_softc *sc, uint32_t vector)
893 {
894 	uint64_t queue = 1ULL << vector;
895 	uint32_t mask;
896 
897 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
898 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
899 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, mask);
900 	} else {
901 		mask = (queue & 0xFFFFFFFF);
902 		if (mask)
903 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), mask);
904 		mask = (queue >> 32);
905 		if (mask)
906 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), mask);
907 	}
908 }
909 
910 /*********************************************************************
911  *
912  *  Legacy Interrupt Service routine
913  *
914  **********************************************************************/
915 
916 int
917 ixgbe_intr(void *arg)
918 {
919 	struct ix_softc	*sc = (struct ix_softc *)arg;
920 	struct ix_queue *que = sc->queues;
921 	struct ifnet	*ifp = &sc->arpcom.ac_if;
922 	struct tx_ring	*txr = sc->tx_rings;
923 	struct ixgbe_hw	*hw = &sc->hw;
924 	uint32_t	 reg_eicr, mod_mask, msf_mask;
925 	int		 i, refill = 0;
926 
927 	reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
928 	if (reg_eicr == 0) {
929 		ixgbe_enable_intr(sc);
930 		return (0);
931 	}
932 
933 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
934 		ixgbe_rxeof(que);
935 		ixgbe_txeof(txr);
936 		refill = 1;
937 	}
938 
939 	if (refill) {
940 		if (ixgbe_rxfill(que->rxr)) {
941 			/* Advance the Rx Queue "Tail Pointer" */
942 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
943 			    que->rxr->last_desc_filled);
944 		} else
945 			timeout_add(&sc->rx_refill, 1);
946 	}
947 
948 	/* Link status change */
949 	if (reg_eicr & IXGBE_EICR_LSC) {
950 		KERNEL_LOCK();
951 		ixgbe_update_link_status(sc);
952 		KERNEL_UNLOCK();
953 		ifq_start(&ifp->if_snd);
954 	}
955 
956 	if (hw->mac.type != ixgbe_mac_82598EB) {
957 		if (reg_eicr & IXGBE_EICR_ECC) {
958 			printf("%s: CRITICAL: ECC ERROR!! "
959 			    "Please Reboot!!\n", sc->dev.dv_xname);
960 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
961 		}
962 		/* Check for over temp condition */
963 		if (reg_eicr & IXGBE_EICR_TS) {
964 			printf("%s: CRITICAL: OVER TEMP!! "
965 			    "PHY IS SHUT DOWN!!\n", ifp->if_xname);
966 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
967 		}
968 	}
969 
970 	/* Pluggable optics-related interrupt */
971 	if (ixgbe_is_sfp(hw)) {
972 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
973 			mod_mask = IXGBE_EICR_GPI_SDP0_X540;
974 			msf_mask = IXGBE_EICR_GPI_SDP1_X540;
975 		} else if (hw->mac.type == ixgbe_mac_X540 ||
976 		    hw->mac.type == ixgbe_mac_X550 ||
977 		    hw->mac.type == ixgbe_mac_X550EM_x) {
978 			mod_mask = IXGBE_EICR_GPI_SDP2_X540;
979 			msf_mask = IXGBE_EICR_GPI_SDP1_X540;
980 		} else {
981 			mod_mask = IXGBE_EICR_GPI_SDP2;
982 			msf_mask = IXGBE_EICR_GPI_SDP1;
983 		}
984 		if (reg_eicr & mod_mask) {
985 			/* Clear the interrupt */
986 			IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
987 			KERNEL_LOCK();
988 			ixgbe_handle_mod(sc);
989 			KERNEL_UNLOCK();
990 		} else if ((hw->phy.media_type != ixgbe_media_type_copper) &&
991 		    (reg_eicr & msf_mask)) {
992 			/* Clear the interrupt */
993 			IXGBE_WRITE_REG(hw, IXGBE_EICR, msf_mask);
994 			KERNEL_LOCK();
995 			ixgbe_handle_msf(sc);
996 			KERNEL_UNLOCK();
997 		}
998 	}
999 
1000 	/* Check for fan failure */
1001 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1002 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1003 		printf("%s: CRITICAL: FAN FAILURE!! "
1004 		    "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
1005 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1006 	}
1007 
1008 	/* External PHY interrupt */
1009 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1010 	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1011 		/* Clear the interrupt */
1012 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1013 		KERNEL_LOCK();
1014 		ixgbe_handle_phy(sc);
1015 		KERNEL_UNLOCK();
1016 	}
1017 
1018 	for (i = 0; i < sc->num_queues; i++, que++)
1019 		ixgbe_enable_queue(sc, que->msix);
1020 
1021 	return (1);
1022 }
1023 
1024 /*********************************************************************
1025  *
1026  *  Media Ioctl callback
1027  *
1028  *  This routine is called whenever the user queries the status of
1029  *  the interface using ifconfig.
1030  *
1031  **********************************************************************/
1032 void
1033 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
1034 {
1035 	struct ix_softc *sc = ifp->if_softc;
1036 
1037 	ifmr->ifm_active = IFM_ETHER;
1038 	ifmr->ifm_status = IFM_AVALID;
1039 
1040 	INIT_DEBUGOUT("ixgbe_media_status: begin");
1041 	ixgbe_update_link_status(sc);
1042 
1043 	if (LINK_STATE_IS_UP(ifp->if_link_state)) {
1044 		ifmr->ifm_status |= IFM_ACTIVE;
1045 
1046 		switch (sc->link_speed) {
1047 		case IXGBE_LINK_SPEED_100_FULL:
1048 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1049 			break;
1050 		case IXGBE_LINK_SPEED_1GB_FULL:
1051 			switch (sc->optics) {
1052 			case IFM_10G_SR: /* multi-speed fiber */
1053 				ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1054 				break;
1055 			case IFM_10G_LR: /* multi-speed fiber */
1056 				ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1057 				break;
1058 			default:
1059 				ifmr->ifm_active |= sc->optics | IFM_FDX;
1060 				break;
1061 			}
1062 			break;
1063 		case IXGBE_LINK_SPEED_10GB_FULL:
1064 			ifmr->ifm_active |= sc->optics | IFM_FDX;
1065 			break;
1066 		}
1067 
1068 		switch (sc->hw.fc.current_mode) {
1069 		case ixgbe_fc_tx_pause:
1070 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1071 			break;
1072 		case ixgbe_fc_rx_pause:
1073 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1074 			break;
1075 		case ixgbe_fc_full:
1076 			ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
1077 			    IFM_ETH_TXPAUSE;
1078 			break;
1079 		default:
1080 			ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
1081 			    IFM_ETH_TXPAUSE);
1082 			break;
1083 		}
1084 	}
1085 }
1086 
1087 /*********************************************************************
1088  *
1089  *  Media Ioctl callback
1090  *
1091  *  This routine is called when the user changes speed/duplex using
1092  *  media/mediopt option with ifconfig.
1093  *
1094  **********************************************************************/
1095 int
1096 ixgbe_media_change(struct ifnet *ifp)
1097 {
1098 	struct ix_softc	*sc = ifp->if_softc;
1099 	struct ixgbe_hw	*hw = &sc->hw;
1100 	struct ifmedia	*ifm = &sc->media;
1101 	ixgbe_link_speed speed = 0;
1102 
1103 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1104 		return (EINVAL);
1105 
1106 	if (hw->phy.media_type == ixgbe_media_type_backplane)
1107 		return (ENODEV);
1108 
1109 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1110 		case IFM_AUTO:
1111 		case IFM_10G_T:
1112 			speed |= IXGBE_LINK_SPEED_100_FULL;
1113 		case IFM_10G_SR: /* KR, too */
1114 		case IFM_10G_LR:
1115 		case IFM_10G_CX4: /* KX4 */
1116 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1117 		case IFM_10G_SFP_CU:
1118 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1119 			break;
1120 		case IFM_1000_T:
1121 			speed |= IXGBE_LINK_SPEED_100_FULL;
1122 		case IFM_1000_LX:
1123 		case IFM_1000_SX:
1124 		case IFM_1000_CX: /* KX */
1125 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1126 			break;
1127 		case IFM_100_TX:
1128 			speed |= IXGBE_LINK_SPEED_100_FULL;
1129 			break;
1130 		default:
1131 			return (EINVAL);
1132 	}
1133 
1134 	hw->mac.autotry_restart = TRUE;
1135 	hw->mac.ops.setup_link(hw, speed, TRUE);
1136 
1137 	return (0);
1138 }
1139 
1140 /*********************************************************************
1141  *
1142  *  This routine maps the mbufs to tx descriptors, allowing the
1143  *  TX engine to transmit the packets.
1144  *  	- return 0 on success, positive on failure
1145  *
1146  **********************************************************************/
1147 
1148 int
1149 ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
1150 {
1151 	struct ix_softc *sc = txr->sc;
1152 	uint32_t	olinfo_status = 0, cmd_type_len;
1153 	int             i, j, error;
1154 	int		first, last = 0;
1155 	bus_dmamap_t	map;
1156 	struct ixgbe_tx_buf *txbuf;
1157 	union ixgbe_adv_tx_desc *txd = NULL;
1158 
1159 	/* Basic descriptor defines */
1160 	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1161 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1162 
1163 #if NVLAN > 0
1164 	if (m_head->m_flags & M_VLANTAG)
1165 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1166 #endif
1167 
1168 	/*
1169 	 * Important to capture the first descriptor
1170 	 * used because it will contain the index of
1171 	 * the one we tell the hardware to report back
1172 	 */
1173 	first = txr->next_avail_desc;
1174 	txbuf = &txr->tx_buffers[first];
1175 	map = txbuf->map;
1176 
1177 	/*
1178 	 * Map the packet for DMA.
1179 	 */
1180 	error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map, m_head,
1181 	    BUS_DMA_NOWAIT);
1182 	switch (error) {
1183 	case 0:
1184 		break;
1185 	case EFBIG:
1186 		if (m_defrag(m_head, M_NOWAIT) == 0 &&
1187 		    (error = bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1188 		     m_head, BUS_DMA_NOWAIT)) == 0)
1189 			break;
1190 		/* FALLTHROUGH */
1191 	default:
1192 		sc->no_tx_dma_setup++;
1193 		return (error);
1194 	}
1195 
1196 	/* Make certain there are enough descriptors */
1197 	KASSERT(map->dm_nsegs <= txr->tx_avail - 2);
1198 
1199 	/*
1200 	 * Set the appropriate offload context
1201 	 * this will becomes the first descriptor.
1202 	 */
1203 	error = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
1204 	if (error)
1205 		goto xmit_fail;
1206 
1207 	i = txr->next_avail_desc;
1208 	for (j = 0; j < map->dm_nsegs; j++) {
1209 		txbuf = &txr->tx_buffers[i];
1210 		txd = &txr->tx_base[i];
1211 
1212 		txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
1213 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
1214 		    cmd_type_len | map->dm_segs[j].ds_len);
1215 		txd->read.olinfo_status = htole32(olinfo_status);
1216 		last = i; /* descriptor that will get completion IRQ */
1217 
1218 		if (++i == sc->num_tx_desc)
1219 			i = 0;
1220 
1221 		txbuf->m_head = NULL;
1222 		txbuf->eop_index = -1;
1223 	}
1224 
1225 	txd->read.cmd_type_len |=
1226 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1227 
1228 	txbuf->m_head = m_head;
1229 	/*
1230 	 * Here we swap the map so the last descriptor,
1231 	 * which gets the completion interrupt has the
1232 	 * real map, and the first descriptor gets the
1233 	 * unused map from this descriptor.
1234 	 */
1235 	txr->tx_buffers[first].map = txbuf->map;
1236 	txbuf->map = map;
1237 	bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1238 	    BUS_DMASYNC_PREWRITE);
1239 
1240 	/* Set the index of the descriptor that will be marked done */
1241 	txbuf = &txr->tx_buffers[first];
1242 	txbuf->eop_index = last;
1243 
1244 	membar_producer();
1245 
1246 	atomic_sub_int(&txr->tx_avail, map->dm_nsegs);
1247 	txr->next_avail_desc = i;
1248 
1249 	++txr->tx_packets;
1250 	return (0);
1251 
1252 xmit_fail:
1253 	bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
1254 	return (error);
1255 }
1256 
1257 void
1258 ixgbe_iff(struct ix_softc *sc)
1259 {
1260 	struct ifnet *ifp = &sc->arpcom.ac_if;
1261 	struct arpcom *ac = &sc->arpcom;
1262 	uint32_t	fctrl;
1263 	uint8_t	*mta;
1264 	uint8_t	*update_ptr;
1265 	struct ether_multi *enm;
1266 	struct ether_multistep step;
1267 	int	mcnt = 0;
1268 
1269 	IOCTL_DEBUGOUT("ixgbe_iff: begin");
1270 
1271 	mta = sc->mta;
1272 	bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1273 	    MAX_NUM_MULTICAST_ADDRESSES);
1274 
1275 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
1276 	fctrl &= ~(IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE);
1277 	ifp->if_flags &= ~IFF_ALLMULTI;
1278 
1279 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1280 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1281 		ifp->if_flags |= IFF_ALLMULTI;
1282 		fctrl |= IXGBE_FCTRL_MPE;
1283 		if (ifp->if_flags & IFF_PROMISC)
1284 			fctrl |= IXGBE_FCTRL_UPE;
1285 	} else {
1286 		ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
1287 		while (enm != NULL) {
1288 			bcopy(enm->enm_addrlo,
1289 			    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1290 			    IXGBE_ETH_LENGTH_OF_ADDRESS);
1291 			mcnt++;
1292 
1293 			ETHER_NEXT_MULTI(step, enm);
1294 		}
1295 
1296 		update_ptr = mta;
1297 		sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
1298 		    ixgbe_mc_array_itr, TRUE);
1299 	}
1300 
1301 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
1302 }
1303 
1304 /*
1305  * This is an iterator function now needed by the multicast
1306  * shared code. It simply feeds the shared code routine the
1307  * addresses in the array of ixgbe_iff() one by one.
1308  */
1309 uint8_t *
1310 ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1311 {
1312 	uint8_t *addr = *update_ptr;
1313 	uint8_t *newptr;
1314 	*vmdq = 0;
1315 
1316 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1317 	*update_ptr = newptr;
1318 	return addr;
1319 }
1320 
1321 void
1322 ixgbe_local_timer(void *arg)
1323 {
1324 	struct ix_softc *sc = arg;
1325 #ifdef IX_DEBUG
1326 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1327 #endif
1328 	int		 s;
1329 
1330 	s = splnet();
1331 
1332 	ixgbe_update_stats_counters(sc);
1333 
1334 #ifdef IX_DEBUG
1335 	if ((ifp->if_flags & (IFF_RUNNING|IFF_DEBUG)) ==
1336 	    (IFF_RUNNING|IFF_DEBUG))
1337 		ixgbe_print_hw_stats(sc);
1338 #endif
1339 
1340 	timeout_add_sec(&sc->timer, 1);
1341 
1342 	splx(s);
1343 }
1344 
1345 void
1346 ixgbe_update_link_status(struct ix_softc *sc)
1347 {
1348 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1349 	int		link_state = LINK_STATE_DOWN;
1350 
1351 	ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
1352 
1353 	ifp->if_baudrate = 0;
1354 	if (sc->link_up) {
1355 		link_state = LINK_STATE_FULL_DUPLEX;
1356 
1357 		switch (sc->link_speed) {
1358 		case IXGBE_LINK_SPEED_UNKNOWN:
1359 			ifp->if_baudrate = 0;
1360 			break;
1361 		case IXGBE_LINK_SPEED_100_FULL:
1362 			ifp->if_baudrate = IF_Mbps(100);
1363 			break;
1364 		case IXGBE_LINK_SPEED_1GB_FULL:
1365 			ifp->if_baudrate = IF_Gbps(1);
1366 			break;
1367 		case IXGBE_LINK_SPEED_10GB_FULL:
1368 			ifp->if_baudrate = IF_Gbps(10);
1369 			break;
1370 		}
1371 
1372 		/* Update any Flow Control changes */
1373 		sc->hw.mac.ops.fc_enable(&sc->hw);
1374 	}
1375 	if (ifp->if_link_state != link_state) {
1376 		ifp->if_link_state = link_state;
1377 		if_link_state_change(ifp);
1378 	}
1379 }
1380 
1381 
1382 /*********************************************************************
1383  *
1384  *  This routine disables all traffic on the adapter by issuing a
1385  *  global reset on the MAC and deallocates TX/RX buffers.
1386  *
1387  **********************************************************************/
1388 
1389 void
1390 ixgbe_stop(void *arg)
1391 {
1392 	struct ix_softc *sc = arg;
1393 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1394 
1395 	/* Tell the stack that the interface is no longer active */
1396 	ifp->if_flags &= ~IFF_RUNNING;
1397 
1398 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
1399 	ixgbe_disable_intr(sc);
1400 
1401 	sc->hw.mac.ops.reset_hw(&sc->hw);
1402 	sc->hw.adapter_stopped = FALSE;
1403 	sc->hw.mac.ops.stop_adapter(&sc->hw);
1404 	if (sc->hw.mac.type == ixgbe_mac_82599EB)
1405 		sc->hw.mac.ops.stop_mac_link_on_d3(&sc->hw);
1406 	/* Turn off the laser */
1407 	if (sc->hw.mac.ops.disable_tx_laser)
1408 		sc->hw.mac.ops.disable_tx_laser(&sc->hw);
1409 	timeout_del(&sc->timer);
1410 	timeout_del(&sc->rx_refill);
1411 
1412 	/* reprogram the RAR[0] in case user changed it. */
1413 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
1414 
1415 	ifq_barrier(&ifp->if_snd);
1416 	intr_barrier(sc->tag);
1417 
1418 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1419 
1420 	ifq_clr_oactive(&ifp->if_snd);
1421 
1422 	/* Should we really clear all structures on stop? */
1423 	ixgbe_free_transmit_structures(sc);
1424 	ixgbe_free_receive_structures(sc);
1425 }
1426 
1427 
1428 /*********************************************************************
1429  *
1430  *  Determine hardware revision.
1431  *
1432  **********************************************************************/
1433 void
1434 ixgbe_identify_hardware(struct ix_softc *sc)
1435 {
1436 	struct ixgbe_osdep	*os = &sc->osdep;
1437 	struct pci_attach_args	*pa = &os->os_pa;
1438 	uint32_t		 reg;
1439 
1440 	/* Save off the information about this board */
1441 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1442 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1443 
1444 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1445 	sc->hw.revision_id = PCI_REVISION(reg);
1446 
1447 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1448 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1449 	sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
1450 
1451 	/* We need this here to set the num_segs below */
1452 	ixgbe_set_mac_type(&sc->hw);
1453 
1454 	/* Pick up the 82599 and VF settings */
1455 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
1456 		sc->hw.phy.smart_speed = ixgbe_smart_speed;
1457 	sc->num_segs = IXGBE_82599_SCATTER;
1458 }
1459 
1460 /*********************************************************************
1461  *
1462  *  Determine optic type
1463  *
1464  **********************************************************************/
1465 void
1466 ixgbe_setup_optics(struct ix_softc *sc)
1467 {
1468 	struct ixgbe_hw *hw = &sc->hw;
1469 	int		layer;
1470 
1471 	layer = hw->mac.ops.get_supported_physical_layer(hw);
1472 
1473 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1474 		sc->optics = IFM_10G_T;
1475 	else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1476 		sc->optics = IFM_1000_T;
1477 	else if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1478 		sc->optics = IFM_100_TX;
1479 	else if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1480 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1481 		sc->optics = IFM_10G_SFP_CU;
1482 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR ||
1483 	    layer & IXGBE_PHYSICAL_LAYER_10GBASE_LRM)
1484 		sc->optics = IFM_10G_LR;
1485 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR)
1486 		sc->optics = IFM_10G_SR;
1487 	else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
1488 	    layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1489 		sc->optics = IFM_10G_CX4;
1490 	else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1491 		sc->optics = IFM_1000_SX;
1492 	else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_LX)
1493 		sc->optics = IFM_1000_LX;
1494 	else
1495 		sc->optics = IFM_AUTO;
1496 }
1497 
1498 /*********************************************************************
1499  *
1500  *  Setup the Legacy or MSI Interrupt handler
1501  *
1502  **********************************************************************/
1503 int
1504 ixgbe_allocate_legacy(struct ix_softc *sc)
1505 {
1506 	struct ixgbe_osdep	*os = &sc->osdep;
1507 	struct pci_attach_args	*pa = &os->os_pa;
1508 	const char		*intrstr = NULL;
1509 	pci_chipset_tag_t	pc = pa->pa_pc;
1510 	pci_intr_handle_t	ih;
1511 
1512 	/* We allocate a single interrupt resource */
1513 	if (pci_intr_map_msi(pa, &ih) != 0 &&
1514 	    pci_intr_map(pa, &ih) != 0) {
1515 		printf(": couldn't map interrupt\n");
1516 		return (ENXIO);
1517 	}
1518 
1519 #if 0
1520 	/* XXX */
1521 	/* Tasklets for Link, SFP and Multispeed Fiber */
1522 	TASK_INIT(&sc->link_task, 0, ixgbe_handle_link, sc);
1523 	TASK_INIT(&sc->mod_task, 0, ixgbe_handle_mod, sc);
1524 	TASK_INIT(&sc->msf_task, 0, ixgbe_handle_msf, sc);
1525 #endif
1526 
1527 	intrstr = pci_intr_string(pc, ih);
1528 	sc->tag = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
1529 	    ixgbe_intr, sc, sc->dev.dv_xname);
1530 	if (sc->tag == NULL) {
1531 		printf(": couldn't establish interrupt");
1532 		if (intrstr != NULL)
1533 			printf(" at %s", intrstr);
1534 		printf("\n");
1535 		return (ENXIO);
1536 	}
1537 	printf(": %s", intrstr);
1538 
1539 	/* For simplicity in the handlers */
1540 	sc->que_mask = IXGBE_EIMS_ENABLE_MASK;
1541 
1542 	return (0);
1543 }
1544 
1545 int
1546 ixgbe_allocate_pci_resources(struct ix_softc *sc)
1547 {
1548 	struct ixgbe_osdep	*os = &sc->osdep;
1549 	struct pci_attach_args	*pa = &os->os_pa;
1550 	int			 val;
1551 
1552 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
1553 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM ||
1554 	    PCI_MAPREG_MEM_TYPE(val) != PCI_MAPREG_MEM_TYPE_64BIT) {
1555 		printf(": mmba is not mem space\n");
1556 		return (ENXIO);
1557 	}
1558 
1559 	if (pci_mapreg_map(pa, PCIR_BAR(0), PCI_MAPREG_MEM_TYPE(val), 0,
1560 	    &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1561 		printf(": cannot find mem space\n");
1562 		return (ENXIO);
1563 	}
1564 	sc->hw.hw_addr = (uint8_t *)os->os_membase;
1565 
1566 	/* Legacy defaults */
1567 	sc->num_queues = 1;
1568 	sc->hw.back = os;
1569 
1570 #ifdef notyet
1571 	/* Now setup MSI or MSI/X, return us the number of supported vectors. */
1572 	sc->msix = ixgbe_setup_msix(sc);
1573 #endif
1574 
1575 	return (0);
1576 }
1577 
1578 void
1579 ixgbe_free_pci_resources(struct ix_softc * sc)
1580 {
1581 	struct ixgbe_osdep	*os = &sc->osdep;
1582 	struct pci_attach_args	*pa = &os->os_pa;
1583 	struct ix_queue *que = sc->queues;
1584 	int i;
1585 
1586 	/* Release all msix queue resources: */
1587 	for (i = 0; i < sc->num_queues; i++, que++) {
1588 		if (que->tag)
1589 			pci_intr_disestablish(pa->pa_pc, que->tag);
1590 		que->tag = NULL;
1591 	}
1592 
1593 	if (sc->tag)
1594 		pci_intr_disestablish(pa->pa_pc, sc->tag);
1595 	sc->tag = NULL;
1596 	if (os->os_membase != 0)
1597 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1598 	os->os_membase = 0;
1599 }
1600 
1601 /*********************************************************************
1602  *
1603  *  Setup networking device structure and register an interface.
1604  *
1605  **********************************************************************/
1606 void
1607 ixgbe_setup_interface(struct ix_softc *sc)
1608 {
1609 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1610 
1611 	strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
1612 	ifp->if_softc = sc;
1613 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1614 	ifp->if_xflags = IFXF_MPSAFE;
1615 	ifp->if_ioctl = ixgbe_ioctl;
1616 	ifp->if_qstart = ixgbe_start;
1617 	ifp->if_timer = 0;
1618 	ifp->if_watchdog = ixgbe_watchdog;
1619 	ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
1620 	    ETHER_HDR_LEN - ETHER_CRC_LEN;
1621 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1622 
1623 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1624 
1625 #if NVLAN > 0
1626 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1627 #endif
1628 
1629 #ifdef IX_CSUM_OFFLOAD
1630 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1631 #endif
1632 
1633 	/*
1634 	 * Specify the media types supported by this sc and register
1635 	 * callbacks to update media and link information
1636 	 */
1637 	ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
1638 	    ixgbe_media_status);
1639 	ixgbe_add_media_types(sc);
1640 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1641 
1642 	if_attach(ifp);
1643 	ether_ifattach(ifp);
1644 
1645 	sc->max_frame_size = IXGBE_MAX_FRAME_SIZE;
1646 }
1647 
1648 void
1649 ixgbe_add_media_types(struct ix_softc *sc)
1650 {
1651 	struct ixgbe_hw	*hw = &sc->hw;
1652 	int		layer;
1653 
1654 	layer = hw->mac.ops.get_supported_physical_layer(hw);
1655 
1656 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1657 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1658 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1659 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1660 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1661 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1662 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1663 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1664 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
1665 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1666 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1667 		if (hw->phy.multispeed_fiber)
1668 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_LX, 0,
1669 			    NULL);
1670 	}
1671 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1672 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1673 		if (hw->phy.multispeed_fiber)
1674 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0,
1675 			    NULL);
1676 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1677 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1678 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1679 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1680 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1681 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1682 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1683 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1684 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1685 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
1686 
1687 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1688 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0,
1689 		    NULL);
1690 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1691 	}
1692 
1693 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1694 }
1695 
1696 void
1697 ixgbe_config_link(struct ix_softc *sc)
1698 {
1699 	uint32_t	autoneg, err = 0;
1700 	bool		negotiate;
1701 
1702 	if (ixgbe_is_sfp(&sc->hw)) {
1703 		if (sc->hw.phy.multispeed_fiber) {
1704 			sc->hw.mac.ops.setup_sfp(&sc->hw);
1705 			if (sc->hw.mac.ops.enable_tx_laser)
1706 				sc->hw.mac.ops.enable_tx_laser(&sc->hw);
1707 			ixgbe_handle_msf(sc);
1708 		} else
1709 			ixgbe_handle_mod(sc);
1710 	} else {
1711 		if (sc->hw.mac.ops.check_link)
1712 			err = sc->hw.mac.ops.check_link(&sc->hw, &autoneg,
1713 			    &sc->link_up, FALSE);
1714 		if (err)
1715 			return;
1716 		autoneg = sc->hw.phy.autoneg_advertised;
1717 		if ((!autoneg) && (sc->hw.mac.ops.get_link_capabilities))
1718 			err = sc->hw.mac.ops.get_link_capabilities(&sc->hw,
1719 			    &autoneg, &negotiate);
1720 		if (err)
1721 			return;
1722 		if (sc->hw.mac.ops.setup_link)
1723 			sc->hw.mac.ops.setup_link(&sc->hw,
1724 			    autoneg, sc->link_up);
1725 	}
1726 }
1727 
1728 /********************************************************************
1729  * Manage DMA'able memory.
1730   *******************************************************************/
1731 int
1732 ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
1733 		struct ixgbe_dma_alloc *dma, int mapflags)
1734 {
1735 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1736 	struct ixgbe_osdep	*os = &sc->osdep;
1737 	int			 r;
1738 
1739 	dma->dma_tag = os->os_pa.pa_dmat;
1740 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1741 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1742 	if (r != 0) {
1743 		printf("%s: ixgbe_dma_malloc: bus_dmamap_create failed; "
1744 		       "error %u\n", ifp->if_xname, r);
1745 		goto fail_0;
1746 	}
1747 
1748 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1749 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1750 	if (r != 0) {
1751 		printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
1752 		       "error %u\n", ifp->if_xname, r);
1753 		goto fail_1;
1754 	}
1755 
1756 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1757 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1758 	if (r != 0) {
1759 		printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
1760 		       "error %u\n", ifp->if_xname, r);
1761 		goto fail_2;
1762 	}
1763 
1764 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1765 	    size, NULL, mapflags | BUS_DMA_NOWAIT);
1766 	if (r != 0) {
1767 		printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
1768 		       "error %u\n", ifp->if_xname, r);
1769 		goto fail_3;
1770 	}
1771 
1772 	dma->dma_size = size;
1773 	return (0);
1774 fail_3:
1775 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1776 fail_2:
1777 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1778 fail_1:
1779 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1780 fail_0:
1781 	dma->dma_map = NULL;
1782 	dma->dma_tag = NULL;
1783 	return (r);
1784 }
1785 
1786 void
1787 ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
1788 {
1789 	if (dma->dma_tag == NULL)
1790 		return;
1791 
1792 	if (dma->dma_map != NULL) {
1793 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1794 		    dma->dma_map->dm_mapsize,
1795 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1796 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1797 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1798 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1799 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1800 		dma->dma_map = NULL;
1801 	}
1802 }
1803 
1804 
1805 /*********************************************************************
1806  *
1807  *  Allocate memory for the transmit and receive rings, and then
1808  *  the descriptors associated with each, called only once at attach.
1809  *
1810  **********************************************************************/
1811 int
1812 ixgbe_allocate_queues(struct ix_softc *sc)
1813 {
1814 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1815 	struct ix_queue *que;
1816 	struct tx_ring *txr;
1817 	struct rx_ring *rxr;
1818 	int rsize, tsize;
1819 	int txconf = 0, rxconf = 0, i;
1820 
1821 	/* First allocate the top level queue structs */
1822 	if (!(sc->queues = mallocarray(sc->num_queues,
1823 	    sizeof(struct ix_queue), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1824 		printf("%s: Unable to allocate queue memory\n", ifp->if_xname);
1825 		goto fail;
1826 	}
1827 
1828 	/* Then allocate the TX ring struct memory */
1829 	if (!(sc->tx_rings = mallocarray(sc->num_queues,
1830 	    sizeof(struct tx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1831 		printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
1832 		goto fail;
1833 	}
1834 
1835 	/* Next allocate the RX */
1836 	if (!(sc->rx_rings = mallocarray(sc->num_queues,
1837 	    sizeof(struct rx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1838 		printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
1839 		goto rx_fail;
1840 	}
1841 
1842 	/* For the ring itself */
1843 	tsize = roundup2(sc->num_tx_desc *
1844 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
1845 
1846 	/*
1847 	 * Now set up the TX queues, txconf is needed to handle the
1848 	 * possibility that things fail midcourse and we need to
1849 	 * undo memory gracefully
1850 	 */
1851 	for (i = 0; i < sc->num_queues; i++, txconf++) {
1852 		/* Set up some basics */
1853 		txr = &sc->tx_rings[i];
1854 		txr->sc = sc;
1855 		txr->me = i;
1856 
1857 		if (ixgbe_dma_malloc(sc, tsize,
1858 		    &txr->txdma, BUS_DMA_NOWAIT)) {
1859 			printf("%s: Unable to allocate TX Descriptor memory\n",
1860 			    ifp->if_xname);
1861 			goto err_tx_desc;
1862 		}
1863 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
1864 		bzero((void *)txr->tx_base, tsize);
1865 	}
1866 
1867 	/*
1868 	 * Next the RX queues...
1869 	 */
1870 	rsize = roundup2(sc->num_rx_desc *
1871 	    sizeof(union ixgbe_adv_rx_desc), 4096);
1872 	for (i = 0; i < sc->num_queues; i++, rxconf++) {
1873 		rxr = &sc->rx_rings[i];
1874 		/* Set up some basics */
1875 		rxr->sc = sc;
1876 		rxr->me = i;
1877 
1878 		if (ixgbe_dma_malloc(sc, rsize,
1879 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
1880 			printf("%s: Unable to allocate RxDescriptor memory\n",
1881 			    ifp->if_xname);
1882 			goto err_rx_desc;
1883 		}
1884 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
1885 		bzero((void *)rxr->rx_base, rsize);
1886 	}
1887 
1888 	/*
1889 	 * Finally set up the queue holding structs
1890 	 */
1891 	for (i = 0; i < sc->num_queues; i++) {
1892 		que = &sc->queues[i];
1893 		que->sc = sc;
1894 		que->txr = &sc->tx_rings[i];
1895 		que->rxr = &sc->rx_rings[i];
1896 	}
1897 
1898 	return (0);
1899 
1900 err_rx_desc:
1901 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
1902 		ixgbe_dma_free(sc, &rxr->rxdma);
1903 err_tx_desc:
1904 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
1905 		ixgbe_dma_free(sc, &txr->txdma);
1906 	free(sc->rx_rings, M_DEVBUF, sc->num_queues * sizeof(struct rx_ring));
1907 	sc->rx_rings = NULL;
1908 rx_fail:
1909 	free(sc->tx_rings, M_DEVBUF, sc->num_queues * sizeof(struct tx_ring));
1910 	sc->tx_rings = NULL;
1911 fail:
1912 	return (ENOMEM);
1913 }
1914 
1915 /*********************************************************************
1916  *
1917  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
1918  *  the information needed to transmit a packet on the wire. This is
1919  *  called only once at attach, setup is done every reset.
1920  *
1921  **********************************************************************/
1922 int
1923 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
1924 {
1925 	struct ix_softc 	*sc = txr->sc;
1926 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1927 	struct ixgbe_tx_buf	*txbuf;
1928 	int			 error, i;
1929 
1930 	if (!(txr->tx_buffers = mallocarray(sc->num_tx_desc,
1931 	    sizeof(struct ixgbe_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1932 		printf("%s: Unable to allocate tx_buffer memory\n",
1933 		    ifp->if_xname);
1934 		error = ENOMEM;
1935 		goto fail;
1936 	}
1937 	txr->txtag = txr->txdma.dma_tag;
1938 
1939 	/* Create the descriptor buffer dma maps */
1940 	for (i = 0; i < sc->num_tx_desc; i++) {
1941 		txbuf = &txr->tx_buffers[i];
1942 		error = bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
1943 			    sc->num_segs, PAGE_SIZE, 0,
1944 			    BUS_DMA_NOWAIT, &txbuf->map);
1945 
1946 		if (error != 0) {
1947 			printf("%s: Unable to create TX DMA map\n",
1948 			    ifp->if_xname);
1949 			goto fail;
1950 		}
1951 	}
1952 
1953 	return 0;
1954 fail:
1955 	return (error);
1956 }
1957 
1958 /*********************************************************************
1959  *
1960  *  Initialize a transmit ring.
1961  *
1962  **********************************************************************/
1963 int
1964 ixgbe_setup_transmit_ring(struct tx_ring *txr)
1965 {
1966 	struct ix_softc		*sc = txr->sc;
1967 	int			 error;
1968 
1969 	/* Now allocate transmit buffers for the ring */
1970 	if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
1971 		return (error);
1972 
1973 	/* Clear the old ring contents */
1974 	bzero((void *)txr->tx_base,
1975 	      (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
1976 
1977 	/* Reset indices */
1978 	txr->next_avail_desc = 0;
1979 	txr->next_to_clean = 0;
1980 
1981 	/* Set number of descriptors available */
1982 	txr->tx_avail = sc->num_tx_desc;
1983 
1984 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
1985 	    0, txr->txdma.dma_map->dm_mapsize,
1986 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1987 
1988 	return (0);
1989 }
1990 
1991 /*********************************************************************
1992  *
1993  *  Initialize all transmit rings.
1994  *
1995  **********************************************************************/
1996 int
1997 ixgbe_setup_transmit_structures(struct ix_softc *sc)
1998 {
1999 	struct tx_ring *txr = sc->tx_rings;
2000 	int		i, error;
2001 
2002 	for (i = 0; i < sc->num_queues; i++, txr++) {
2003 		if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
2004 			goto fail;
2005 	}
2006 
2007 	return (0);
2008 fail:
2009 	ixgbe_free_transmit_structures(sc);
2010 	return (error);
2011 }
2012 
2013 /*********************************************************************
2014  *
2015  *  Enable transmit unit.
2016  *
2017  **********************************************************************/
2018 void
2019 ixgbe_initialize_transmit_units(struct ix_softc *sc)
2020 {
2021 	struct ifnet	*ifp = &sc->arpcom.ac_if;
2022 	struct tx_ring	*txr;
2023 	struct ixgbe_hw	*hw = &sc->hw;
2024 	int		 i;
2025 	uint64_t	 tdba;
2026 	uint32_t	 txctrl;
2027 
2028 	/* Setup the Base and Length of the Tx Descriptor Ring */
2029 
2030 	for (i = 0; i < sc->num_queues; i++) {
2031 		txr = &sc->tx_rings[i];
2032 
2033 		/* Setup descriptor base address */
2034 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
2035 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2036 		       (tdba & 0x00000000ffffffffULL));
2037 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2038 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2039 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2040 
2041 		/* Setup the HW Tx Head and Tail descriptor pointers */
2042 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2043 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2044 
2045 		/* Setup Transmit Descriptor Cmd Settings */
2046 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2047 		txr->queue_status = IXGBE_QUEUE_IDLE;
2048 		txr->watchdog_timer = 0;
2049 
2050 		/* Disable Head Writeback */
2051 		switch (hw->mac.type) {
2052 		case ixgbe_mac_82598EB:
2053 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2054 			break;
2055 		case ixgbe_mac_82599EB:
2056 		case ixgbe_mac_X540:
2057 		default:
2058 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2059 			break;
2060 		}
2061 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2062 		switch (hw->mac.type) {
2063 		case ixgbe_mac_82598EB:
2064 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2065 			break;
2066 		case ixgbe_mac_82599EB:
2067 		case ixgbe_mac_X540:
2068 		default:
2069 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2070 			break;
2071 		}
2072 	}
2073 	ifp->if_timer = 0;
2074 
2075 	if (hw->mac.type != ixgbe_mac_82598EB) {
2076 		uint32_t dmatxctl, rttdcs;
2077 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2078 		dmatxctl |= IXGBE_DMATXCTL_TE;
2079 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2080 		/* Disable arbiter to set MTQC */
2081 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2082 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
2083 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2084 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2085 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2086 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2087 	}
2088 }
2089 
2090 /*********************************************************************
2091  *
2092  *  Free all transmit rings.
2093  *
2094  **********************************************************************/
2095 void
2096 ixgbe_free_transmit_structures(struct ix_softc *sc)
2097 {
2098 	struct tx_ring *txr = sc->tx_rings;
2099 	int		i;
2100 
2101 	for (i = 0; i < sc->num_queues; i++, txr++)
2102 		ixgbe_free_transmit_buffers(txr);
2103 }
2104 
2105 /*********************************************************************
2106  *
2107  *  Free transmit ring related data structures.
2108  *
2109  **********************************************************************/
2110 void
2111 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2112 {
2113 	struct ix_softc *sc = txr->sc;
2114 	struct ixgbe_tx_buf *tx_buffer;
2115 	int             i;
2116 
2117 	INIT_DEBUGOUT("free_transmit_ring: begin");
2118 
2119 	if (txr->tx_buffers == NULL)
2120 		return;
2121 
2122 	tx_buffer = txr->tx_buffers;
2123 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2124 		if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
2125 			bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2126 			    0, tx_buffer->map->dm_mapsize,
2127 			    BUS_DMASYNC_POSTWRITE);
2128 			bus_dmamap_unload(txr->txdma.dma_tag,
2129 			    tx_buffer->map);
2130 		}
2131 		if (tx_buffer->m_head != NULL) {
2132 			m_freem(tx_buffer->m_head);
2133 			tx_buffer->m_head = NULL;
2134 		}
2135 		if (tx_buffer->map != NULL) {
2136 			bus_dmamap_destroy(txr->txdma.dma_tag,
2137 			    tx_buffer->map);
2138 			tx_buffer->map = NULL;
2139 		}
2140 	}
2141 
2142 	if (txr->tx_buffers != NULL)
2143 		free(txr->tx_buffers, M_DEVBUF,
2144 		    sc->num_tx_desc * sizeof(struct ixgbe_tx_buf));
2145 	txr->tx_buffers = NULL;
2146 	txr->txtag = NULL;
2147 }
2148 
2149 /*********************************************************************
2150  *
2151  *  Advanced Context Descriptor setup for VLAN or CSUM
2152  *
2153  **********************************************************************/
2154 
2155 int
2156 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
2157     uint32_t *cmd_type_len, uint32_t *olinfo_status)
2158 {
2159 	struct ix_softc *sc = txr->sc;
2160 	struct ixgbe_adv_tx_context_desc *TXD;
2161 	struct ixgbe_tx_buf *tx_buffer;
2162 #if NVLAN > 0
2163 	struct ether_vlan_header *eh;
2164 #else
2165 	struct ether_header *eh;
2166 #endif
2167 	struct ip *ip;
2168 #ifdef notyet
2169 	struct ip6_hdr *ip6;
2170 #endif
2171 	struct mbuf *m;
2172 	int	ipoff;
2173 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2174 	int 	ehdrlen, ip_hlen = 0;
2175 	uint16_t etype;
2176 	uint8_t	ipproto = 0;
2177 	int	offload = TRUE;
2178 	int	ctxd = txr->next_avail_desc;
2179 #if NVLAN > 0
2180 	uint16_t vtag = 0;
2181 #endif
2182 
2183 #if notyet
2184 	/* First check if TSO is to be used */
2185 	if (mp->m_pkthdr.csum_flags & CSUM_TSO)
2186 		return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
2187 #endif
2188 
2189 	if ((mp->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) == 0)
2190 		offload = FALSE;
2191 
2192 	/* Indicate the whole packet as payload when not doing TSO */
2193 	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
2194 
2195 	/* Now ready a context descriptor */
2196 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2197 	tx_buffer = &txr->tx_buffers[ctxd];
2198 
2199 	/*
2200 	 * In advanced descriptors the vlan tag must
2201 	 * be placed into the descriptor itself. Hence
2202 	 * we need to make one even if not doing offloads.
2203 	 */
2204 #if NVLAN > 0
2205 	if (mp->m_flags & M_VLANTAG) {
2206 		vtag = mp->m_pkthdr.ether_vtag;
2207 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2208 	} else
2209 #endif
2210 	if (offload == FALSE)
2211 		return (0);	/* No need for CTX */
2212 
2213 	/*
2214 	 * Determine where frame payload starts.
2215 	 * Jump over vlan headers if already present,
2216 	 * helpful for QinQ too.
2217 	 */
2218 	if (mp->m_len < sizeof(struct ether_header))
2219 		return (1);
2220 #if NVLAN > 0
2221 	eh = mtod(mp, struct ether_vlan_header *);
2222 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2223 		if (mp->m_len < sizeof(struct ether_vlan_header))
2224 			return (1);
2225 		etype = ntohs(eh->evl_proto);
2226 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2227 	} else {
2228 		etype = ntohs(eh->evl_encap_proto);
2229 		ehdrlen = ETHER_HDR_LEN;
2230 	}
2231 #else
2232 	eh = mtod(mp, struct ether_header *);
2233 	etype = ntohs(eh->ether_type);
2234 	ehdrlen = ETHER_HDR_LEN;
2235 #endif
2236 
2237 	/* Set the ether header length */
2238 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2239 
2240 	switch (etype) {
2241 	case ETHERTYPE_IP:
2242 		if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip))
2243 			return (1);
2244 		m = m_getptr(mp, ehdrlen, &ipoff);
2245 		KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip));
2246 		ip = (struct ip *)(m->m_data + ipoff);
2247 		ip_hlen = ip->ip_hl << 2;
2248 		ipproto = ip->ip_p;
2249 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2250 		break;
2251 #ifdef notyet
2252 	case ETHERTYPE_IPV6:
2253 		if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip6))
2254 			return (1);
2255 		m = m_getptr(mp, ehdrlen, &ipoff);
2256 		KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip6));
2257 		ip6 = (struct ip6 *)(m->m_data + ipoff);
2258 		ip_hlen = sizeof(*ip6);
2259 		/* XXX-BZ this will go badly in case of ext hdrs. */
2260 		ipproto = ip6->ip6_nxt;
2261 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2262 		break;
2263 #endif
2264 	default:
2265 		offload = FALSE;
2266 		break;
2267 	}
2268 
2269 	vlan_macip_lens |= ip_hlen;
2270 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2271 
2272 	switch (ipproto) {
2273 	case IPPROTO_TCP:
2274 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
2275 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2276 		break;
2277 	case IPPROTO_UDP:
2278 		if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
2279 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2280 		break;
2281 	default:
2282 		offload = FALSE;
2283 		break;
2284 	}
2285 
2286 	if (offload) /* For the TX descriptor setup */
2287 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
2288 
2289 	/* Now copy bits into descriptor */
2290 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
2291 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
2292 	TXD->seqnum_seed = htole32(0);
2293 	TXD->mss_l4len_idx = htole32(0);
2294 
2295 	tx_buffer->m_head = NULL;
2296 	tx_buffer->eop_index = -1;
2297 
2298 	membar_producer();
2299 
2300 	/* We've consumed the first desc, adjust counters */
2301 	if (++ctxd == sc->num_tx_desc)
2302 		ctxd = 0;
2303 	txr->next_avail_desc = ctxd;
2304 	atomic_dec_int(&txr->tx_avail);
2305 
2306 	return (0);
2307 }
2308 
2309 /**********************************************************************
2310  *
2311  *  Examine each tx_buffer in the used queue. If the hardware is done
2312  *  processing the packet then free associated resources. The
2313  *  tx_buffer is put back on the free queue.
2314  *
2315  **********************************************************************/
2316 int
2317 ixgbe_txeof(struct tx_ring *txr)
2318 {
2319 	struct ix_softc			*sc = txr->sc;
2320 	struct ifnet			*ifp = &sc->arpcom.ac_if;
2321 	uint32_t			 first, last, done, processed;
2322 	uint32_t			 num_avail;
2323 	struct ixgbe_tx_buf		*tx_buffer;
2324 	struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
2325 
2326 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2327 		return FALSE;
2328 
2329 	if (txr->tx_avail == sc->num_tx_desc) {
2330 		txr->queue_status = IXGBE_QUEUE_IDLE;
2331 		return FALSE;
2332 	}
2333 
2334 	membar_consumer();
2335 
2336 	processed = 0;
2337 	first = txr->next_to_clean;
2338 	/* was the txt queue cleaned up in the meantime */
2339 	if (txr->tx_buffers == NULL)
2340 		return FALSE;
2341 	tx_buffer = &txr->tx_buffers[first];
2342 	/* For cleanup we just use legacy struct */
2343 	tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
2344 	last = tx_buffer->eop_index;
2345 	if (last == -1)
2346 		return FALSE;
2347 	eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2348 
2349 	/*
2350 	 * Get the index of the first descriptor
2351 	 * BEYOND the EOP and call that 'done'.
2352 	 * I do this so the comparison in the
2353 	 * inner while loop below can be simple
2354 	 */
2355 	if (++last == sc->num_tx_desc) last = 0;
2356 	done = last;
2357 
2358 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2359 	    0, txr->txdma.dma_map->dm_mapsize,
2360 	    BUS_DMASYNC_POSTREAD);
2361 
2362 	while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
2363 		/* We clean the range of the packet */
2364 		while (first != done) {
2365 			tx_desc->upper.data = 0;
2366 			tx_desc->lower.data = 0;
2367 			tx_desc->buffer_addr = 0;
2368 			++processed;
2369 
2370 			if (tx_buffer->m_head) {
2371 				bus_dmamap_sync(txr->txdma.dma_tag,
2372 				    tx_buffer->map,
2373 				    0, tx_buffer->map->dm_mapsize,
2374 				    BUS_DMASYNC_POSTWRITE);
2375 				bus_dmamap_unload(txr->txdma.dma_tag,
2376 				    tx_buffer->map);
2377 				m_freem(tx_buffer->m_head);
2378 				tx_buffer->m_head = NULL;
2379 			}
2380 			tx_buffer->eop_index = -1;
2381 
2382 			if (++first == sc->num_tx_desc)
2383 				first = 0;
2384 
2385 			tx_buffer = &txr->tx_buffers[first];
2386 			tx_desc = (struct ixgbe_legacy_tx_desc *)
2387 			    &txr->tx_base[first];
2388 		}
2389 		++txr->packets;
2390 		/* See if there is more work now */
2391 		last = tx_buffer->eop_index;
2392 		if (last != -1) {
2393 			eop_desc =
2394 			    (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2395 			/* Get next done point */
2396 			if (++last == sc->num_tx_desc) last = 0;
2397 			done = last;
2398 		} else
2399 			break;
2400 	}
2401 
2402 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2403 	    0, txr->txdma.dma_map->dm_mapsize,
2404 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2405 
2406 	txr->next_to_clean = first;
2407 
2408 	num_avail = atomic_add_int_nv(&txr->tx_avail, processed);
2409 
2410 	/* All clean, turn off the timer */
2411 	if (num_avail == sc->num_tx_desc)
2412 		ifp->if_timer = 0;
2413 
2414 	if (ifq_is_oactive(&ifp->if_snd))
2415 		ifq_restart(&ifp->if_snd);
2416 
2417 	return TRUE;
2418 }
2419 
2420 /*********************************************************************
2421  *
2422  *  Get a buffer from system mbuf buffer pool.
2423  *
2424  **********************************************************************/
2425 int
2426 ixgbe_get_buf(struct rx_ring *rxr, int i)
2427 {
2428 	struct ix_softc		*sc = rxr->sc;
2429 	struct ixgbe_rx_buf	*rxbuf;
2430 	struct mbuf		*mp;
2431 	int			error;
2432 	union ixgbe_adv_rx_desc	*rxdesc;
2433 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2434 
2435 	rxbuf = &rxr->rx_buffers[i];
2436 	rxdesc = &rxr->rx_base[i];
2437 	if (rxbuf->buf) {
2438 		printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
2439 		    sc->dev.dv_xname, i);
2440 		return (ENOBUFS);
2441 	}
2442 
2443 	/* needed in any case so prealocate since this one will fail for sure */
2444 	mp = MCLGETI(NULL, M_DONTWAIT, NULL, sc->rx_mbuf_sz);
2445 	if (!mp)
2446 		return (ENOBUFS);
2447 
2448 	mp->m_len = mp->m_pkthdr.len = sc->rx_mbuf_sz;
2449 	m_adj(mp, ETHER_ALIGN);
2450 
2451 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
2452 	    mp, BUS_DMA_NOWAIT);
2453 	if (error) {
2454 		m_freem(mp);
2455 		return (error);
2456 	}
2457 
2458 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2459 	    0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2460 	rxbuf->buf = mp;
2461 
2462 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2463 	    dsize * i, dsize, BUS_DMASYNC_POSTWRITE);
2464 
2465 	rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
2466 
2467 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2468 	    dsize * i, dsize, BUS_DMASYNC_PREWRITE);
2469 
2470 	return (0);
2471 }
2472 
2473 /*********************************************************************
2474  *
2475  *  Allocate memory for rx_buffer structures. Since we use one
2476  *  rx_buffer per received packet, the maximum number of rx_buffer's
2477  *  that we'll need is equal to the number of receive descriptors
2478  *  that we've allocated.
2479  *
2480  **********************************************************************/
2481 int
2482 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2483 {
2484 	struct ix_softc		*sc = rxr->sc;
2485 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2486 	struct ixgbe_rx_buf 	*rxbuf;
2487 	int			i, error;
2488 
2489 	if (!(rxr->rx_buffers = mallocarray(sc->num_rx_desc,
2490 	    sizeof(struct ixgbe_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2491 		printf("%s: Unable to allocate rx_buffer memory\n",
2492 		    ifp->if_xname);
2493 		error = ENOMEM;
2494 		goto fail;
2495 	}
2496 
2497 	rxbuf = rxr->rx_buffers;
2498 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2499 		error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,
2500 		    16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->map);
2501 		if (error) {
2502 			printf("%s: Unable to create Pack DMA map\n",
2503 			    ifp->if_xname);
2504 			goto fail;
2505 		}
2506 	}
2507 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2508 	    rxr->rxdma.dma_map->dm_mapsize,
2509 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2510 
2511 	return (0);
2512 
2513 fail:
2514 	return (error);
2515 }
2516 
2517 /*********************************************************************
2518  *
2519  *  Initialize a receive ring and its buffers.
2520  *
2521  **********************************************************************/
2522 int
2523 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2524 {
2525 	struct ix_softc		*sc = rxr->sc;
2526 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2527 	int			 rsize, error;
2528 
2529 	rsize = roundup2(sc->num_rx_desc *
2530 	    sizeof(union ixgbe_adv_rx_desc), 4096);
2531 	/* Clear the ring contents */
2532 	bzero((void *)rxr->rx_base, rsize);
2533 
2534 	if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
2535 		return (error);
2536 
2537 	/* Setup our descriptor indices */
2538 	rxr->next_to_check = 0;
2539 	rxr->last_desc_filled = sc->num_rx_desc - 1;
2540 
2541 	if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
2542 	    sc->num_rx_desc);
2543 
2544 	ixgbe_rxfill(rxr);
2545 	if (if_rxr_inuse(&rxr->rx_ring) == 0) {
2546 		printf("%s: unable to fill any rx descriptors\n",
2547 		    sc->dev.dv_xname);
2548 		return (ENOBUFS);
2549 	}
2550 
2551 	return (0);
2552 }
2553 
2554 int
2555 ixgbe_rxfill(struct rx_ring *rxr)
2556 {
2557 	struct ix_softc *sc = rxr->sc;
2558 	int		 post = 0;
2559 	u_int		 slots;
2560 	int		 i;
2561 
2562 	i = rxr->last_desc_filled;
2563 	for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc);
2564 	    slots > 0; slots--) {
2565 		if (++i == sc->num_rx_desc)
2566 			i = 0;
2567 
2568 		if (ixgbe_get_buf(rxr, i) != 0)
2569 			break;
2570 
2571 		rxr->last_desc_filled = i;
2572 		post = 1;
2573 	}
2574 
2575 	if_rxr_put(&rxr->rx_ring, slots);
2576 
2577 	return (post);
2578 }
2579 
2580 void
2581 ixgbe_rxrefill(void *xsc)
2582 {
2583 	struct ix_softc *sc = xsc;
2584 	struct ix_queue *que = sc->queues;
2585 	int s;
2586 
2587 	s = splnet();
2588 	if (ixgbe_rxfill(que->rxr)) {
2589 		/* Advance the Rx Queue "Tail Pointer" */
2590 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
2591 		    que->rxr->last_desc_filled);
2592 	} else
2593 		timeout_add(&sc->rx_refill, 1);
2594 	splx(s);
2595 }
2596 
2597 /*********************************************************************
2598  *
2599  *  Initialize all receive rings.
2600  *
2601  **********************************************************************/
2602 int
2603 ixgbe_setup_receive_structures(struct ix_softc *sc)
2604 {
2605 	struct rx_ring *rxr = sc->rx_rings;
2606 	int i;
2607 
2608 	for (i = 0; i < sc->num_queues; i++, rxr++)
2609 		if (ixgbe_setup_receive_ring(rxr))
2610 			goto fail;
2611 
2612 	return (0);
2613 fail:
2614 	ixgbe_free_receive_structures(sc);
2615 	return (ENOBUFS);
2616 }
2617 
2618 /*********************************************************************
2619  *
2620  *  Setup receive registers and features.
2621  *
2622  **********************************************************************/
2623 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2624 
2625 void
2626 ixgbe_initialize_receive_units(struct ix_softc *sc)
2627 {
2628 	struct rx_ring	*rxr = sc->rx_rings;
2629 	struct ixgbe_hw	*hw = &sc->hw;
2630 	uint32_t	bufsz, fctrl, srrctl, rxcsum;
2631 	uint32_t	hlreg;
2632 	int		i;
2633 
2634 	/*
2635 	 * Make sure receives are disabled while
2636 	 * setting up the descriptor ring
2637 	 */
2638 	ixgbe_disable_rx(hw);
2639 
2640 	/* Enable broadcasts */
2641 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2642 	fctrl |= IXGBE_FCTRL_BAM;
2643 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2644 		fctrl |= IXGBE_FCTRL_DPF;
2645 		fctrl |= IXGBE_FCTRL_PMCF;
2646 	}
2647 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2648 
2649 	/* Always enable jumbo frame reception */
2650 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2651 	hlreg |= IXGBE_HLREG0_JUMBOEN;
2652 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2653 
2654 	bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2655 
2656 	for (i = 0; i < sc->num_queues; i++, rxr++) {
2657 		uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2658 
2659 		/* Setup the Base and Length of the Rx Descriptor Ring */
2660 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2661 			       (rdba & 0x00000000ffffffffULL));
2662 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2663 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2664 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2665 
2666 		/* Set up the SRRCTL register */
2667 		srrctl = bufsz | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2668 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2669 
2670 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
2671 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2672 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2673 	}
2674 
2675 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2676 		uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR |
2677 			      IXGBE_PSRTYPE_UDPHDR |
2678 			      IXGBE_PSRTYPE_IPV4HDR |
2679 			      IXGBE_PSRTYPE_IPV6HDR;
2680 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2681 	}
2682 
2683 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2684 	rxcsum &= ~IXGBE_RXCSUM_PCSD;
2685 
2686 	/* Setup RSS */
2687 	if (sc->num_queues > 1) {
2688 		ixgbe_initialize_rss_mapping(sc);
2689 
2690 		/* RSS and RX IPP Checksum are mutually exclusive */
2691 		rxcsum |= IXGBE_RXCSUM_PCSD;
2692 	}
2693 
2694 	/* This is useful for calculating UDP/IP fragment checksums */
2695 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2696 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
2697 
2698 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2699 }
2700 
2701 void
2702 ixgbe_initialize_rss_mapping(struct ix_softc *sc)
2703 {
2704 	struct ixgbe_hw	*hw = &sc->hw;
2705 	uint32_t reta = 0, mrqc, rss_key[10];
2706 	int i, j, queue_id, table_size, index_mult;
2707 
2708 	/* set up random bits */
2709 	arc4random_buf(&rss_key, sizeof(rss_key));
2710 
2711 	/* Set multiplier for RETA setup and table size based on MAC */
2712 	index_mult = 0x1;
2713 	table_size = 128;
2714 	switch (sc->hw.mac.type) {
2715 	case ixgbe_mac_82598EB:
2716 		index_mult = 0x11;
2717 		break;
2718 	case ixgbe_mac_X550:
2719 	case ixgbe_mac_X550EM_x:
2720 		table_size = 512;
2721 		break;
2722 	default:
2723 		break;
2724 	}
2725 
2726 	/* Set up the redirection table */
2727 	for (i = 0, j = 0; i < table_size; i++, j++) {
2728 		if (j == sc->num_queues) j = 0;
2729 		queue_id = (j * index_mult);
2730 		/*
2731 		 * The low 8 bits are for hash value (n+0);
2732 		 * The next 8 bits are for hash value (n+1), etc.
2733 		 */
2734 		reta = reta >> 8;
2735 		reta = reta | ( ((uint32_t) queue_id) << 24);
2736 		if ((i & 3) == 3) {
2737 			if (i < 128)
2738 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2739 			else
2740 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
2741 				    reta);
2742 			reta = 0;
2743 		}
2744 	}
2745 
2746 	/* Now fill our hash function seeds */
2747 	for (i = 0; i < 10; i++)
2748 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2749 
2750 	/*
2751 	 * Disable UDP - IP fragments aren't currently being handled
2752 	 * and so we end up with a mix of 2-tuple and 4-tuple
2753 	 * traffic.
2754 	 */
2755 	mrqc = IXGBE_MRQC_RSSEN
2756 	     | IXGBE_MRQC_RSS_FIELD_IPV4
2757 	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2758 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2759 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2760 	     | IXGBE_MRQC_RSS_FIELD_IPV6
2761 	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2762 	;
2763 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2764 }
2765 
2766 /*********************************************************************
2767  *
2768  *  Free all receive rings.
2769  *
2770  **********************************************************************/
2771 void
2772 ixgbe_free_receive_structures(struct ix_softc *sc)
2773 {
2774 	struct rx_ring *rxr;
2775 	int		i;
2776 
2777 	for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2778 		if_rxr_init(&rxr->rx_ring, 0, 0);
2779 
2780 	for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2781 		ixgbe_free_receive_buffers(rxr);
2782 }
2783 
2784 /*********************************************************************
2785  *
2786  *  Free receive ring data structures
2787  *
2788  **********************************************************************/
2789 void
2790 ixgbe_free_receive_buffers(struct rx_ring *rxr)
2791 {
2792 	struct ix_softc		*sc;
2793 	struct ixgbe_rx_buf	*rxbuf;
2794 	int			 i;
2795 
2796 	sc = rxr->sc;
2797 	if (rxr->rx_buffers != NULL) {
2798 		for (i = 0; i < sc->num_rx_desc; i++) {
2799 			rxbuf = &rxr->rx_buffers[i];
2800 			if (rxbuf->buf != NULL) {
2801 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2802 				    0, rxbuf->map->dm_mapsize,
2803 				    BUS_DMASYNC_POSTREAD);
2804 				bus_dmamap_unload(rxr->rxdma.dma_tag,
2805 				    rxbuf->map);
2806 				m_freem(rxbuf->buf);
2807 				rxbuf->buf = NULL;
2808 			}
2809 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
2810 			rxbuf->map = NULL;
2811 		}
2812 		free(rxr->rx_buffers, M_DEVBUF,
2813 		    sc->num_rx_desc * sizeof(struct ixgbe_rx_buf));
2814 		rxr->rx_buffers = NULL;
2815 	}
2816 }
2817 
2818 /*********************************************************************
2819  *
2820  *  This routine executes in interrupt context. It replenishes
2821  *  the mbufs in the descriptor and sends data which has been
2822  *  dma'ed into host memory to upper layer.
2823  *
2824  *********************************************************************/
2825 int
2826 ixgbe_rxeof(struct ix_queue *que)
2827 {
2828 	struct ix_softc 	*sc = que->sc;
2829 	struct rx_ring		*rxr = que->rxr;
2830 	struct ifnet   		*ifp = &sc->arpcom.ac_if;
2831 	struct mbuf_list	 ml = MBUF_LIST_INITIALIZER();
2832 	struct mbuf    		*mp, *sendmp;
2833 	uint8_t		    	 eop = 0;
2834 	uint16_t		 len, vtag;
2835 	uint32_t		 staterr = 0, ptype;
2836 	struct ixgbe_rx_buf	*rxbuf, *nxbuf;
2837 	union ixgbe_adv_rx_desc	*rxdesc;
2838 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2839 	int			 i, nextp;
2840 
2841 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2842 		return FALSE;
2843 
2844 	i = rxr->next_to_check;
2845 	while (if_rxr_inuse(&rxr->rx_ring) > 0) {
2846 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2847 		    dsize * i, dsize, BUS_DMASYNC_POSTREAD);
2848 
2849 		rxdesc = &rxr->rx_base[i];
2850 		staterr = letoh32(rxdesc->wb.upper.status_error);
2851 		if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
2852 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2853 			    dsize * i, dsize,
2854 			    BUS_DMASYNC_PREREAD);
2855 			break;
2856 		}
2857 
2858 		/* Zero out the receive descriptors status  */
2859 		rxdesc->wb.upper.status_error = 0;
2860 		rxbuf = &rxr->rx_buffers[i];
2861 
2862 		/* pull the mbuf off the ring */
2863 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
2864 		    rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2865 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
2866 
2867 		mp = rxbuf->buf;
2868 		len = letoh16(rxdesc->wb.upper.length);
2869 		ptype = letoh32(rxdesc->wb.lower.lo_dword.data) &
2870 		    IXGBE_RXDADV_PKTTYPE_MASK;
2871 		vtag = letoh16(rxdesc->wb.upper.vlan);
2872 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
2873 
2874 		if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
2875 			sc->dropped_pkts++;
2876 
2877 			if (rxbuf->fmp) {
2878 				m_freem(rxbuf->fmp);
2879 				rxbuf->fmp = NULL;
2880 			}
2881 
2882 			m_freem(mp);
2883 			rxbuf->buf = NULL;
2884 			goto next_desc;
2885 		}
2886 
2887 		if (mp == NULL) {
2888 			panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
2889 			    "(nrx %d, filled %d)", sc->dev.dv_xname,
2890 			    i, if_rxr_inuse(&rxr->rx_ring),
2891 			    rxr->last_desc_filled);
2892 		}
2893 
2894 		/* Currently no HW RSC support of 82599 */
2895 		if (!eop) {
2896 			/*
2897 			 * Figure out the next descriptor of this frame.
2898 			 */
2899 			nextp = i + 1;
2900 			if (nextp == sc->num_rx_desc)
2901 				nextp = 0;
2902 			nxbuf = &rxr->rx_buffers[nextp];
2903 			/* prefetch(nxbuf); */
2904 		}
2905 
2906 		/*
2907 		 * Rather than using the fmp/lmp global pointers
2908 		 * we now keep the head of a packet chain in the
2909 		 * buffer struct and pass this along from one
2910 		 * descriptor to the next, until we get EOP.
2911 		 */
2912 		mp->m_len = len;
2913 		/*
2914 		 * See if there is a stored head
2915 		 * that determines what we are
2916 		 */
2917 		sendmp = rxbuf->fmp;
2918 		rxbuf->buf = rxbuf->fmp = NULL;
2919 
2920 		if (sendmp != NULL) /* secondary frag */
2921 			sendmp->m_pkthdr.len += mp->m_len;
2922 		else {
2923 			/* first desc of a non-ps chain */
2924 			sendmp = mp;
2925 			sendmp->m_pkthdr.len = mp->m_len;
2926 #if NVLAN > 0
2927 			if (staterr & IXGBE_RXD_STAT_VP) {
2928 				sendmp->m_pkthdr.ether_vtag = vtag;
2929 				sendmp->m_flags |= M_VLANTAG;
2930 			}
2931 #endif
2932 		}
2933 
2934 		/* Pass the head pointer on */
2935 		if (eop == 0) {
2936 			nxbuf->fmp = sendmp;
2937 			sendmp = NULL;
2938 			mp->m_next = nxbuf->buf;
2939 		} else { /* Sending this frame? */
2940 			rxr->rx_packets++;
2941 			/* capture data for AIM */
2942 			rxr->bytes += sendmp->m_pkthdr.len;
2943 			rxr->rx_bytes += sendmp->m_pkthdr.len;
2944 
2945 			ixgbe_rx_checksum(staterr, sendmp, ptype);
2946 
2947 			ml_enqueue(&ml, sendmp);
2948 		}
2949 next_desc:
2950 		if_rxr_put(&rxr->rx_ring, 1);
2951 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2952 		    dsize * i, dsize,
2953 		    BUS_DMASYNC_PREREAD);
2954 
2955 		/* Advance our pointers to the next descriptor. */
2956 		if (++i == sc->num_rx_desc)
2957 			i = 0;
2958 	}
2959 	rxr->next_to_check = i;
2960 
2961 	if_input(ifp, &ml);
2962 
2963 	if (!(staterr & IXGBE_RXD_STAT_DD))
2964 		return FALSE;
2965 
2966 	return TRUE;
2967 }
2968 
2969 /*********************************************************************
2970  *
2971  *  Verify that the hardware indicated that the checksum is valid.
2972  *  Inform the stack about the status of checksum so that stack
2973  *  doesn't spend time verifying the checksum.
2974  *
2975  *********************************************************************/
2976 void
2977 ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp, uint32_t ptype)
2978 {
2979 	uint16_t status = (uint16_t) staterr;
2980 	uint8_t  errors = (uint8_t) (staterr >> 24);
2981 
2982 	if (status & IXGBE_RXD_STAT_IPCS) {
2983 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
2984 			/* IP Checksum Good */
2985 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
2986 		} else
2987 			mp->m_pkthdr.csum_flags = 0;
2988 	}
2989 	if (status & IXGBE_RXD_STAT_L4CS) {
2990 		if (!(errors & IXGBE_RXD_ERR_TCPE))
2991 			mp->m_pkthdr.csum_flags |=
2992 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
2993 	}
2994 }
2995 
2996 void
2997 ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
2998 {
2999 	uint32_t	ctrl;
3000 	int		i;
3001 
3002 	/*
3003 	 * A soft reset zero's out the VFTA, so
3004 	 * we need to repopulate it now.
3005 	 */
3006 	for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
3007 		if (sc->shadow_vfta[i] != 0)
3008 			IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTA(i),
3009 			    sc->shadow_vfta[i]);
3010 	}
3011 
3012 	ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
3013 #if 0
3014 	/* Enable the Filter Table if enabled */
3015 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3016 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3017 		ctrl |= IXGBE_VLNCTRL_VFE;
3018 	}
3019 #endif
3020 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
3021 		ctrl |= IXGBE_VLNCTRL_VME;
3022 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
3023 
3024 	/* On 82599 the VLAN enable is per/queue in RXDCTL */
3025 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3026 		for (i = 0; i < sc->num_queues; i++) {
3027 			ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
3028 			ctrl |= IXGBE_RXDCTL_VME;
3029 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), ctrl);
3030 		}
3031 	}
3032 }
3033 
3034 void
3035 ixgbe_enable_intr(struct ix_softc *sc)
3036 {
3037 	struct ixgbe_hw *hw = &sc->hw;
3038 	struct ix_queue *que = sc->queues;
3039 	uint32_t	mask, fwsm;
3040 	int i;
3041 
3042 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3043 	/* Enable Fan Failure detection */
3044 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3045 		    mask |= IXGBE_EIMS_GPI_SDP1;
3046 
3047 	switch (sc->hw.mac.type) {
3048 	case ixgbe_mac_82599EB:
3049 		mask |= IXGBE_EIMS_ECC;
3050 		/* Temperature sensor on some adapters */
3051 		mask |= IXGBE_EIMS_GPI_SDP0;
3052 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3053 		mask |= IXGBE_EIMS_GPI_SDP1;
3054 		mask |= IXGBE_EIMS_GPI_SDP2;
3055 		break;
3056 	case ixgbe_mac_X540:
3057 		mask |= IXGBE_EIMS_ECC;
3058 		/* Detect if Thermal Sensor is enabled */
3059 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3060 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3061 			mask |= IXGBE_EIMS_TS;
3062 		break;
3063 	case ixgbe_mac_X550:
3064 	case ixgbe_mac_X550EM_x:
3065 		mask |= IXGBE_EIMS_ECC;
3066 		/* MAC thermal sensor is automatically enabled */
3067 		mask |= IXGBE_EIMS_TS;
3068 		/* Some devices use SDP0 for important information */
3069 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3070 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3071 			mask |= IXGBE_EIMS_GPI_SDP0_X540;
3072 	default:
3073 		break;
3074 	}
3075 
3076 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3077 
3078 	/* With MSI-X we use auto clear */
3079 	if (sc->msix > 1) {
3080 		mask = IXGBE_EIMS_ENABLE_MASK;
3081 		/* Don't autoclear Link */
3082 		mask &= ~IXGBE_EIMS_OTHER;
3083 		mask &= ~IXGBE_EIMS_LSC;
3084 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3085 	}
3086 
3087 	/*
3088 	 * Now enable all queues, this is done separately to
3089 	 * allow for handling the extended (beyond 32) MSIX
3090 	 * vectors that can be used by 82599
3091 	 */
3092 	for (i = 0; i < sc->num_queues; i++, que++)
3093 		ixgbe_enable_queue(sc, que->msix);
3094 
3095 	IXGBE_WRITE_FLUSH(hw);
3096 }
3097 
3098 void
3099 ixgbe_disable_intr(struct ix_softc *sc)
3100 {
3101 	if (sc->msix > 1)
3102 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3103 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3104 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3105 	} else {
3106 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3107 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3108 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3109 	}
3110 	IXGBE_WRITE_FLUSH(&sc->hw);
3111 }
3112 
3113 uint16_t
3114 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
3115 {
3116 	struct pci_attach_args	*pa;
3117 	uint32_t value;
3118 	int high = 0;
3119 
3120 	if (reg & 0x2) {
3121 		high = 1;
3122 		reg &= ~0x2;
3123 	}
3124 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3125 	value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3126 
3127 	if (high)
3128 		value >>= 16;
3129 
3130 	return (value & 0xffff);
3131 }
3132 
3133 void
3134 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value)
3135 {
3136 	struct pci_attach_args	*pa;
3137 	uint32_t rv;
3138 	int high = 0;
3139 
3140 	/* Need to do read/mask/write... because 16 vs 32 bit!!! */
3141 	if (reg & 0x2) {
3142 		high = 1;
3143 		reg &= ~0x2;
3144 	}
3145 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3146 	rv = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3147 	if (!high)
3148 		rv = (rv & 0xffff0000) | value;
3149 	else
3150 		rv = (rv & 0xffff) | ((uint32_t)value << 16);
3151 	pci_conf_write(pa->pa_pc, pa->pa_tag, reg, rv);
3152 }
3153 
3154 /*
3155  * Setup the correct IVAR register for a particular MSIX interrupt
3156  *   (yes this is all very magic and confusing :)
3157  *  - entry is the register array entry
3158  *  - vector is the MSIX vector for this queue
3159  *  - type is RX/TX/MISC
3160  */
3161 void
3162 ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
3163 {
3164 	struct ixgbe_hw *hw = &sc->hw;
3165 	uint32_t ivar, index;
3166 
3167 	vector |= IXGBE_IVAR_ALLOC_VAL;
3168 
3169 	switch (hw->mac.type) {
3170 
3171 	case ixgbe_mac_82598EB:
3172 		if (type == -1)
3173 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3174 		else
3175 			entry += (type * 64);
3176 		index = (entry >> 2) & 0x1F;
3177 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3178 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3179 		ivar |= (vector << (8 * (entry & 0x3)));
3180 		IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3181 		break;
3182 
3183 	case ixgbe_mac_82599EB:
3184 	case ixgbe_mac_X540:
3185 	case ixgbe_mac_X550:
3186 	case ixgbe_mac_X550EM_x:
3187 		if (type == -1) { /* MISC IVAR */
3188 			index = (entry & 1) * 8;
3189 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3190 			ivar &= ~(0xFF << index);
3191 			ivar |= (vector << index);
3192 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3193 		} else {	/* RX/TX IVARS */
3194 			index = (16 * (entry & 1)) + (8 * type);
3195 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3196 			ivar &= ~(0xFF << index);
3197 			ivar |= (vector << index);
3198 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3199 		}
3200 
3201 	default:
3202 		break;
3203 	}
3204 }
3205 
3206 void
3207 ixgbe_configure_ivars(struct ix_softc *sc)
3208 {
3209 #if notyet
3210 	struct ix_queue *que = sc->queues;
3211 	uint32_t newitr;
3212 	int i;
3213 
3214 	if (ixgbe_max_interrupt_rate > 0)
3215 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3216 	else
3217 		newitr = 0;
3218 
3219 	for (i = 0; i < sc->num_queues; i++, que++) {
3220 		/* First the RX queue entry */
3221 		ixgbe_set_ivar(sc, i, que->msix, 0);
3222 		/* ... and the TX */
3223 		ixgbe_set_ivar(sc, i, que->msix, 1);
3224 		/* Set an Initial EITR value */
3225 		IXGBE_WRITE_REG(&sc->hw,
3226 		    IXGBE_EITR(que->msix), newitr);
3227 	}
3228 
3229 	/* For the Link interrupt */
3230 	ixgbe_set_ivar(sc, 1, sc->linkvec, -1);
3231 #endif
3232 }
3233 
3234 /*
3235  * SFP module interrupts handler
3236  */
3237 void
3238 ixgbe_handle_mod(struct ix_softc *sc)
3239 {
3240 	struct ixgbe_hw *hw = &sc->hw;
3241 	uint32_t err;
3242 
3243 	err = hw->phy.ops.identify_sfp(hw);
3244 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3245 		printf("%s: Unsupported SFP+ module type was detected!\n",
3246 		    sc->dev.dv_xname);
3247 		return;
3248 	}
3249 	err = hw->mac.ops.setup_sfp(hw);
3250 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3251 		printf("%s: Setup failure - unsupported SFP+ module type!\n",
3252 		    sc->dev.dv_xname);
3253 		return;
3254 	}
3255 	/* Set the optics type so system reports correctly */
3256 	ixgbe_setup_optics(sc);
3257 
3258 	ixgbe_handle_msf(sc);
3259 }
3260 
3261 
3262 /*
3263  * MSF (multispeed fiber) interrupts handler
3264  */
3265 void
3266 ixgbe_handle_msf(struct ix_softc *sc)
3267 {
3268 	struct ixgbe_hw *hw = &sc->hw;
3269 	uint32_t autoneg;
3270 	bool negotiate;
3271 
3272 	autoneg = hw->phy.autoneg_advertised;
3273 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) {
3274 		if (hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate))
3275 			return;
3276 	}
3277 	if (hw->mac.ops.setup_link)
3278 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3279 
3280 	ifmedia_delete_instance(&sc->media, IFM_INST_ANY);
3281 	ixgbe_add_media_types(sc);
3282 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
3283 }
3284 
3285 /*
3286  * External PHY interrupts handler
3287  */
3288 void
3289 ixgbe_handle_phy(struct ix_softc *sc)
3290 {
3291 	struct ixgbe_hw *hw = &sc->hw;
3292 	int error;
3293 
3294 	error = hw->phy.ops.handle_lasi(hw);
3295 	if (error == IXGBE_ERR_OVERTEMP)
3296 		printf("%s: CRITICAL: EXTERNAL PHY OVER TEMP!! "
3297 		    " PHY will downshift to lower power state!\n",
3298 		    sc->dev.dv_xname);
3299 	else if (error)
3300 		printf("%s: Error handling LASI interrupt: %d\n",
3301 		    sc->dev.dv_xname, error);
3302 
3303 }
3304 
3305 /**********************************************************************
3306  *
3307  *  Update the board statistics counters.
3308  *
3309  **********************************************************************/
3310 void
3311 ixgbe_update_stats_counters(struct ix_softc *sc)
3312 {
3313 	struct ifnet	*ifp = &sc->arpcom.ac_if;
3314 	struct ixgbe_hw	*hw = &sc->hw;
3315 	uint64_t	total_missed_rx = 0;
3316 #ifdef IX_DEBUG
3317 	uint32_t	missed_rx = 0, bprc, lxon, lxoff, total;
3318 	int		i;
3319 #endif
3320 
3321 	sc->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3322 	sc->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3323 
3324 #ifdef IX_DEBUG
3325 	for (i = 0; i < 8; i++) {
3326 		uint32_t mp;
3327 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3328 		/* missed_rx tallies misses for the gprc workaround */
3329 		missed_rx += mp;
3330 		/* global total per queue */
3331 		sc->stats.mpc[i] += mp;
3332 		/* running comprehensive total for stats display */
3333 		total_missed_rx += sc->stats.mpc[i];
3334 		if (hw->mac.type == ixgbe_mac_82598EB)
3335 			sc->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3336 	}
3337 
3338 	/* Hardware workaround, gprc counts missed packets */
3339 	sc->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3340 	sc->stats.gprc -= missed_rx;
3341 
3342 	if (hw->mac.type != ixgbe_mac_82598EB) {
3343 		sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3344 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3345 		sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3346 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3347 		sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3348 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3349 		sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3350 		sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3351 	} else {
3352 		sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3353 		sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3354 		/* 82598 only has a counter in the high register */
3355 		sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3356 		sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3357 		sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3358 	}
3359 
3360 	/*
3361 	 * Workaround: mprc hardware is incorrectly counting
3362 	 * broadcasts, so for now we subtract those.
3363 	 */
3364 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3365 	sc->stats.bprc += bprc;
3366 	sc->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3367 	if (hw->mac.type == ixgbe_mac_82598EB)
3368 		sc->stats.mprc -= bprc;
3369 
3370 	sc->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3371 	sc->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3372 	sc->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3373 	sc->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3374 	sc->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3375 	sc->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3376 	sc->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3377 
3378 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3379 	sc->stats.lxontxc += lxon;
3380 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3381 	sc->stats.lxofftxc += lxoff;
3382 	total = lxon + lxoff;
3383 
3384 	sc->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3385 	sc->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3386 	sc->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3387 	sc->stats.gptc -= total;
3388 	sc->stats.mptc -= total;
3389 	sc->stats.ptc64 -= total;
3390 	sc->stats.gotc -= total * ETHER_MIN_LEN;
3391 
3392 	sc->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3393 	sc->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3394 	sc->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3395 	sc->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3396 	sc->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3397 	sc->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3398 	sc->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3399 	sc->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3400 	sc->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3401 	sc->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3402 #endif
3403 
3404 	/* Fill out the OS statistics structure */
3405 	ifp->if_collisions = 0;
3406 	ifp->if_oerrors = sc->watchdog_events;
3407 	ifp->if_ierrors = total_missed_rx + sc->stats.crcerrs + sc->stats.rlec;
3408 }
3409 
3410 #ifdef IX_DEBUG
3411 /**********************************************************************
3412  *
3413  *  This routine is called only when ixgbe_display_debug_stats is enabled.
3414  *  This routine provides a way to take a look at important statistics
3415  *  maintained by the driver and hardware.
3416  *
3417  **********************************************************************/
3418 void
3419 ixgbe_print_hw_stats(struct ix_softc * sc)
3420 {
3421 	struct ifnet   *ifp = &sc->arpcom.ac_if;;
3422 
3423 	printf("%s: missed pkts %llu, rx len errs %llu, crc errs %llu, "
3424 	    "dropped pkts %lu, watchdog timeouts %ld, "
3425 	    "XON rx %llu, XON tx %llu, XOFF rx %llu, XOFF tx %llu, "
3426 	    "total pkts rx %llu, good pkts rx %llu, good pkts tx %llu, "
3427 	    "tso tx %lu\n",
3428 	    ifp->if_xname,
3429 	    (long long)sc->stats.mpc[0],
3430 	    (long long)sc->stats.roc + (long long)sc->stats.ruc,
3431 	    (long long)sc->stats.crcerrs,
3432 	    sc->dropped_pkts,
3433 	    sc->watchdog_events,
3434 	    (long long)sc->stats.lxonrxc,
3435 	    (long long)sc->stats.lxontxc,
3436 	    (long long)sc->stats.lxoffrxc,
3437 	    (long long)sc->stats.lxofftxc,
3438 	    (long long)sc->stats.tpr,
3439 	    (long long)sc->stats.gprc,
3440 	    (long long)sc->stats.gptc,
3441 	    sc->tso_tx);
3442 }
3443 #endif
3444