xref: /openbsd-src/sys/dev/pci/if_ix.c (revision 99fd087599a8791921855f21bd7e36130f39aadc)
1 /*	$OpenBSD: if_ix.c,v 1.161 2020/03/02 01:59:01 jmatthew Exp $	*/
2 
3 /******************************************************************************
4 
5   Copyright (c) 2001-2013, Intel Corporation
6   All rights reserved.
7 
8   Redistribution and use in source and binary forms, with or without
9   modification, are permitted provided that the following conditions are met:
10 
11    1. Redistributions of source code must retain the above copyright notice,
12       this list of conditions and the following disclaimer.
13 
14    2. Redistributions in binary form must reproduce the above copyright
15       notice, this list of conditions and the following disclaimer in the
16       documentation and/or other materials provided with the distribution.
17 
18    3. Neither the name of the Intel Corporation nor the names of its
19       contributors may be used to endorse or promote products derived from
20       this software without specific prior written permission.
21 
22   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32   POSSIBILITY OF SUCH DAMAGE.
33 
34 ******************************************************************************/
35 /* FreeBSD: src/sys/dev/ixgbe/ixgbe.c 251964 Jun 18 21:28:19 2013 UTC */
36 
37 #include <dev/pci/if_ix.h>
38 #include <dev/pci/ixgbe_type.h>
39 
40 /*********************************************************************
41  *  Driver version
42  *********************************************************************/
43 /* char ixgbe_driver_version[] = "2.5.13"; */
44 
45 /*********************************************************************
46  *  PCI Device ID Table
47  *
48  *  Used by probe to select devices to load on
49  *********************************************************************/
50 
51 const struct pci_matchid ixgbe_devices[] = {
52 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598 },
53 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_BX },
54 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF_DUAL },
55 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AF },
56 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT },
57 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT2 },
58 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598AT_DUAL },
59 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4 },
60 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_CX4_DUAL },
61 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_XF_LR },
62 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598EB_SFP },
63 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_SR_DUAL_EM },
64 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82598_DA_DUAL },
65 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4 },
66 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_KX4_MEZZ },
67 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_XAUI },
68 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_COMBO_BP },
69 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_BPLANE_FCOE },
70 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_CX4 },
71 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_T3_LOM },
72 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP },
73 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_EM },
74 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF_QP },
75 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_SF2 },
76 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_SFP_FCOE },
77 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599EN_SFP },
78 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599_QSFP_SF_QP },
79 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T },
80 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540T1 },
81 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T },
82 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550T1 },
83 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KX4 },
84 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_KR },
85 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_SFP },
86 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_10G_T },
87 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_1G_T },
88 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_KR },
89 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_KR_L },
90 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SFP_N },
91 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SFP },
92 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SGMII },
93 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_SGMII_L },
94 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_10G_T },
95 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_1G_T },
96 	{ PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_1G_T_L }
97 };
98 
99 /*********************************************************************
100  *  Function prototypes
101  *********************************************************************/
102 int	ixgbe_probe(struct device *, void *, void *);
103 void	ixgbe_attach(struct device *, struct device *, void *);
104 int	ixgbe_detach(struct device *, int);
105 void	ixgbe_start(struct ifqueue *);
106 int	ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
107 int	ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *);
108 int	ixgbe_get_sffpage(struct ix_softc *, struct if_sffpage *);
109 void	ixgbe_watchdog(struct ifnet *);
110 void	ixgbe_init(void *);
111 void	ixgbe_stop(void *);
112 void	ixgbe_media_status(struct ifnet *, struct ifmediareq *);
113 int	ixgbe_media_change(struct ifnet *);
114 void	ixgbe_identify_hardware(struct ix_softc *);
115 int	ixgbe_allocate_pci_resources(struct ix_softc *);
116 int	ixgbe_allocate_legacy(struct ix_softc *);
117 int	ixgbe_allocate_queues(struct ix_softc *);
118 void	ixgbe_free_pci_resources(struct ix_softc *);
119 void	ixgbe_local_timer(void *);
120 void	ixgbe_setup_interface(struct ix_softc *);
121 void	ixgbe_config_gpie(struct ix_softc *);
122 void	ixgbe_config_delay_values(struct ix_softc *);
123 void	ixgbe_add_media_types(struct ix_softc *);
124 void	ixgbe_config_link(struct ix_softc *);
125 
126 int	ixgbe_allocate_transmit_buffers(struct tx_ring *);
127 int	ixgbe_setup_transmit_structures(struct ix_softc *);
128 int	ixgbe_setup_transmit_ring(struct tx_ring *);
129 void	ixgbe_initialize_transmit_units(struct ix_softc *);
130 void	ixgbe_free_transmit_structures(struct ix_softc *);
131 void	ixgbe_free_transmit_buffers(struct tx_ring *);
132 
133 int	ixgbe_allocate_receive_buffers(struct rx_ring *);
134 int	ixgbe_setup_receive_structures(struct ix_softc *);
135 int	ixgbe_setup_receive_ring(struct rx_ring *);
136 void	ixgbe_initialize_receive_units(struct ix_softc *);
137 void	ixgbe_free_receive_structures(struct ix_softc *);
138 void	ixgbe_free_receive_buffers(struct rx_ring *);
139 void	ixgbe_initialize_rss_mapping(struct ix_softc *);
140 int	ixgbe_rxfill(struct rx_ring *);
141 void	ixgbe_rxrefill(void *);
142 
143 void	ixgbe_enable_intr(struct ix_softc *);
144 void	ixgbe_disable_intr(struct ix_softc *);
145 void	ixgbe_update_stats_counters(struct ix_softc *);
146 int	ixgbe_txeof(struct tx_ring *);
147 int	ixgbe_rxeof(struct ix_queue *);
148 void	ixgbe_rx_checksum(uint32_t, struct mbuf *, uint32_t);
149 void	ixgbe_iff(struct ix_softc *);
150 #ifdef IX_DEBUG
151 void	ixgbe_print_hw_stats(struct ix_softc *);
152 #endif
153 void	ixgbe_update_link_status(struct ix_softc *);
154 int	ixgbe_get_buf(struct rx_ring *, int);
155 int	ixgbe_encap(struct tx_ring *, struct mbuf *);
156 int	ixgbe_dma_malloc(struct ix_softc *, bus_size_t,
157 		    struct ixgbe_dma_alloc *, int);
158 void	ixgbe_dma_free(struct ix_softc *, struct ixgbe_dma_alloc *);
159 int	ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *, uint32_t *,
160 	    uint32_t *);
161 int	ixgbe_tso_setup(struct tx_ring *, struct mbuf *, uint32_t *,
162 	    uint32_t *);
163 void	ixgbe_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
164 void	ixgbe_configure_ivars(struct ix_softc *);
165 uint8_t	*ixgbe_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
166 
167 void	ixgbe_setup_vlan_hw_support(struct ix_softc *);
168 
169 /* Support for pluggable optic modules */
170 void	ixgbe_handle_mod(struct ix_softc *);
171 void	ixgbe_handle_msf(struct ix_softc *);
172 void	ixgbe_handle_phy(struct ix_softc *);
173 
174 /* Legacy (single vector interrupt handler */
175 int	ixgbe_intr(void *);
176 void	ixgbe_enable_queue(struct ix_softc *, uint32_t);
177 void	ixgbe_disable_queue(struct ix_softc *, uint32_t);
178 void	ixgbe_rearm_queue(struct ix_softc *, uint32_t);
179 
180 /*********************************************************************
181  *  OpenBSD Device Interface Entry Points
182  *********************************************************************/
183 
184 struct cfdriver ix_cd = {
185 	NULL, "ix", DV_IFNET
186 };
187 
188 struct cfattach ix_ca = {
189 	sizeof(struct ix_softc), ixgbe_probe, ixgbe_attach, ixgbe_detach
190 };
191 
192 int ixgbe_smart_speed = ixgbe_smart_speed_on;
193 
194 /*********************************************************************
195  *  Device identification routine
196  *
197  *  ixgbe_probe determines if the driver should be loaded on
198  *  adapter based on PCI vendor/device id of the adapter.
199  *
200  *  return 0 on success, positive on failure
201  *********************************************************************/
202 
203 int
204 ixgbe_probe(struct device *parent, void *match, void *aux)
205 {
206 	INIT_DEBUGOUT("ixgbe_probe: begin");
207 
208 	return (pci_matchbyid((struct pci_attach_args *)aux, ixgbe_devices,
209 	    nitems(ixgbe_devices)));
210 }
211 
212 /*********************************************************************
213  *  Device initialization routine
214  *
215  *  The attach entry point is called when the driver is being loaded.
216  *  This routine identifies the type of hardware, allocates all resources
217  *  and initializes the hardware.
218  *
219  *  return 0 on success, positive on failure
220  *********************************************************************/
221 
222 void
223 ixgbe_attach(struct device *parent, struct device *self, void *aux)
224 {
225 	struct pci_attach_args	*pa = (struct pci_attach_args *)aux;
226 	struct ix_softc		*sc = (struct ix_softc *)self;
227 	int			 error = 0;
228 	uint16_t		 csum;
229 	uint32_t			 ctrl_ext;
230 	struct ixgbe_hw		*hw = &sc->hw;
231 
232 	INIT_DEBUGOUT("ixgbe_attach: begin");
233 
234 	sc->osdep.os_sc = sc;
235 	sc->osdep.os_pa = *pa;
236 
237 	rw_init(&sc->sfflock, "ixsff");
238 
239 	/* Set up the timer callout */
240 	timeout_set(&sc->timer, ixgbe_local_timer, sc);
241 	timeout_set(&sc->rx_refill, ixgbe_rxrefill, sc);
242 
243 	/* Determine hardware revision */
244 	ixgbe_identify_hardware(sc);
245 
246 	/* Indicate to RX setup to use Jumbo Clusters */
247 	sc->num_tx_desc = DEFAULT_TXD;
248 	sc->num_rx_desc = DEFAULT_RXD;
249 
250 	/* Do base PCI setup - map BAR0 */
251 	if (ixgbe_allocate_pci_resources(sc))
252 		goto err_out;
253 
254 	/* Allocate our TX/RX Queues */
255 	if (ixgbe_allocate_queues(sc))
256 		goto err_out;
257 
258 	/* Allocate multicast array memory. */
259 	sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS,
260 	    MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
261 	if (sc->mta == NULL) {
262 		printf(": Can not allocate multicast setup array\n");
263 		goto err_late;
264 	}
265 
266 	/* Initialize the shared code */
267 	error = ixgbe_init_shared_code(hw);
268 	if (error) {
269 		printf(": Unable to initialize the shared code\n");
270 		goto err_late;
271 	}
272 
273 	/* Make sure we have a good EEPROM before we read from it */
274 	if (sc->hw.eeprom.ops.validate_checksum(&sc->hw, &csum) < 0) {
275 		printf(": The EEPROM Checksum Is Not Valid\n");
276 		goto err_late;
277 	}
278 
279 	error = ixgbe_init_hw(hw);
280 	if (error == IXGBE_ERR_EEPROM_VERSION) {
281 		printf(": This device is a pre-production adapter/"
282 		    "LOM.  Please be aware there may be issues associated "
283 		    "with your hardware.\nIf you are experiencing problems "
284 		    "please contact your Intel or hardware representative "
285 		    "who provided you with this hardware.\n");
286 	} else if (error && (error != IXGBE_ERR_SFP_NOT_PRESENT &&
287 	    error != IXGBE_ERR_SFP_NOT_SUPPORTED)) {
288 		printf(": Hardware Initialization Failure\n");
289 		goto err_late;
290 	}
291 
292 	bcopy(sc->hw.mac.addr, sc->arpcom.ac_enaddr,
293 	    IXGBE_ETH_LENGTH_OF_ADDRESS);
294 
295 	error = ixgbe_allocate_legacy(sc);
296 	if (error)
297 		goto err_late;
298 
299 	/* Enable the optics for 82599 SFP+ fiber */
300 	if (sc->hw.mac.ops.enable_tx_laser)
301 		sc->hw.mac.ops.enable_tx_laser(&sc->hw);
302 
303 	/* Enable power to the phy */
304 	if (hw->phy.ops.set_phy_power)
305 		hw->phy.ops.set_phy_power(&sc->hw, TRUE);
306 
307 	/* Setup OS specific network interface */
308 	ixgbe_setup_interface(sc);
309 
310 	/* Initialize statistics */
311 	ixgbe_update_stats_counters(sc);
312 
313 	/* Get the PCI-E bus info and determine LAN ID */
314 	hw->mac.ops.get_bus_info(hw);
315 
316 	/* Set an initial default flow control value */
317 	sc->fc = ixgbe_fc_full;
318 
319 	/* let hardware know driver is loaded */
320 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
321 	ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
322 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
323 
324 	printf(", address %s\n", ether_sprintf(sc->hw.mac.addr));
325 
326 	INIT_DEBUGOUT("ixgbe_attach: end");
327 	return;
328 
329 err_late:
330 	ixgbe_free_transmit_structures(sc);
331 	ixgbe_free_receive_structures(sc);
332 err_out:
333 	ixgbe_free_pci_resources(sc);
334 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
335 	    MAX_NUM_MULTICAST_ADDRESSES);
336 }
337 
338 /*********************************************************************
339  *  Device removal routine
340  *
341  *  The detach entry point is called when the driver is being removed.
342  *  This routine stops the adapter and deallocates all the resources
343  *  that were allocated for driver operation.
344  *
345  *  return 0 on success, positive on failure
346  *********************************************************************/
347 
348 int
349 ixgbe_detach(struct device *self, int flags)
350 {
351 	struct ix_softc *sc = (struct ix_softc *)self;
352 	struct ifnet *ifp = &sc->arpcom.ac_if;
353 	uint32_t	ctrl_ext;
354 
355 	INIT_DEBUGOUT("ixgbe_detach: begin");
356 
357 	ixgbe_stop(sc);
358 
359 	/* let hardware know driver is unloading */
360 	ctrl_ext = IXGBE_READ_REG(&sc->hw, IXGBE_CTRL_EXT);
361 	ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
362 	IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
363 
364 	ether_ifdetach(ifp);
365 	if_detach(ifp);
366 
367 	timeout_del(&sc->timer);
368 	timeout_del(&sc->rx_refill);
369 	ixgbe_free_pci_resources(sc);
370 
371 	ixgbe_free_transmit_structures(sc);
372 	ixgbe_free_receive_structures(sc);
373 	free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
374 	    MAX_NUM_MULTICAST_ADDRESSES);
375 
376 	return (0);
377 }
378 
379 /*********************************************************************
380  *  Transmit entry point
381  *
382  *  ixgbe_start is called by the stack to initiate a transmit.
383  *  The driver will remain in this routine as long as there are
384  *  packets to transmit and transmit resources are available.
385  *  In case resources are not available stack is notified and
386  *  the packet is requeued.
387  **********************************************************************/
388 
389 void
390 ixgbe_start(struct ifqueue *ifq)
391 {
392 	struct ifnet		*ifp = ifq->ifq_if;
393 	struct ix_softc		*sc = ifp->if_softc;
394 	struct tx_ring		*txr = sc->tx_rings;
395 	struct mbuf  		*m_head;
396 	unsigned int		 head, free, used;
397 	int			 post = 0;
398 
399 	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(ifq))
400 		return;
401 	if (!sc->link_up)
402 		return;
403 
404 	head = txr->next_avail_desc;
405 	free = txr->next_to_clean;
406 	if (free <= head)
407 		free += sc->num_tx_desc;
408 	free -= head;
409 
410 	membar_consumer();
411 
412 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
413 	    0, txr->txdma.dma_map->dm_mapsize,
414 	    BUS_DMASYNC_POSTWRITE);
415 
416 	for (;;) {
417 		/* Check that we have the minimal number of TX descriptors. */
418 		if (free <= IXGBE_TX_OP_THRESHOLD) {
419 			ifq_set_oactive(ifq);
420 			break;
421 		}
422 
423 		m_head = ifq_dequeue(ifq);
424 		if (m_head == NULL)
425 			break;
426 
427 		used = ixgbe_encap(txr, m_head);
428 		if (used == 0) {
429 			m_freem(m_head);
430 			continue;
431 		}
432 
433 		free -= used;
434 
435 #if NBPFILTER > 0
436 		if (ifp->if_bpf)
437 			bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT);
438 #endif
439 
440 		/* Set timeout in case hardware has problems transmitting */
441 		txr->watchdog_timer = IXGBE_TX_TIMEOUT;
442 		ifp->if_timer = IXGBE_TX_TIMEOUT;
443 
444 		post = 1;
445 	}
446 
447 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
448 	    0, txr->txdma.dma_map->dm_mapsize,
449 	    BUS_DMASYNC_PREWRITE);
450 
451 	/*
452 	 * Advance the Transmit Descriptor Tail (Tdt), this tells the
453 	 * hardware that this frame is available to transmit.
454 	 */
455 	if (post)
456 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TDT(txr->me),
457 		    txr->next_avail_desc);
458 }
459 
460 /*********************************************************************
461  *  Ioctl entry point
462  *
463  *  ixgbe_ioctl is called when the user wants to configure the
464  *  interface.
465  *
466  *  return 0 on success, positive on failure
467  **********************************************************************/
468 
469 int
470 ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
471 {
472 	struct ix_softc	*sc = ifp->if_softc;
473 	struct ifreq	*ifr = (struct ifreq *) data;
474 	int		s, error = 0;
475 
476 	s = splnet();
477 
478 	switch (command) {
479 	case SIOCSIFADDR:
480 		IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
481 		ifp->if_flags |= IFF_UP;
482 		if (!(ifp->if_flags & IFF_RUNNING))
483 			ixgbe_init(sc);
484 		break;
485 
486 	case SIOCSIFFLAGS:
487 		IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
488 		if (ifp->if_flags & IFF_UP) {
489 			if (ifp->if_flags & IFF_RUNNING)
490 				error = ENETRESET;
491 			else
492 				ixgbe_init(sc);
493 		} else {
494 			if (ifp->if_flags & IFF_RUNNING)
495 				ixgbe_stop(sc);
496 		}
497 		break;
498 
499 	case SIOCSIFMEDIA:
500 	case SIOCGIFMEDIA:
501 		IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
502 		error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
503 		break;
504 
505 	case SIOCGIFRXR:
506 		error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
507 		break;
508 
509 	case SIOCGIFSFFPAGE:
510 		error = rw_enter(&sc->sfflock, RW_WRITE|RW_INTR);
511 		if (error != 0)
512 			break;
513 
514 		error = ixgbe_get_sffpage(sc, (struct if_sffpage *)data);
515 		rw_exit(&sc->sfflock);
516 		break;
517 
518 	default:
519 		error = ether_ioctl(ifp, &sc->arpcom, command, data);
520 	}
521 
522 	if (error == ENETRESET) {
523 		if (ifp->if_flags & IFF_RUNNING) {
524 			ixgbe_disable_intr(sc);
525 			ixgbe_iff(sc);
526 			ixgbe_enable_intr(sc);
527 		}
528 		error = 0;
529 	}
530 
531 	splx(s);
532 	return (error);
533 }
534 
535 int
536 ixgbe_get_sffpage(struct ix_softc *sc, struct if_sffpage *sff)
537 {
538 	struct ixgbe_hw *hw = &sc->hw;
539 	uint32_t swfw_mask = hw->phy.phy_semaphore_mask;
540 	uint8_t page;
541 	size_t i;
542 	int error = EIO;
543 
544 	if (hw->phy.type == ixgbe_phy_fw)
545 		return (ENODEV);
546 
547 	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
548 		return (EBUSY); /* XXX */
549 
550 	if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
551 		if (hw->phy.ops.read_i2c_byte_unlocked(hw, 127,
552 		    IFSFF_ADDR_EEPROM, &page))
553 			goto error;
554 		if (page != sff->sff_page &&
555 		    hw->phy.ops.write_i2c_byte_unlocked(hw, 127,
556 		    IFSFF_ADDR_EEPROM, sff->sff_page))
557 			goto error;
558 	}
559 
560 	for (i = 0; i < sizeof(sff->sff_data); i++) {
561 		if (hw->phy.ops.read_i2c_byte_unlocked(hw, i,
562 		    sff->sff_addr, &sff->sff_data[i]))
563 			goto error;
564 	}
565 
566 	if (sff->sff_addr == IFSFF_ADDR_EEPROM) {
567 		if (page != sff->sff_page &&
568 		    hw->phy.ops.write_i2c_byte_unlocked(hw, 127,
569 		    IFSFF_ADDR_EEPROM, page))
570 			goto error;
571 	}
572 
573 	error = 0;
574 error:
575 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
576 	return (error);
577 }
578 
579 int
580 ixgbe_rxrinfo(struct ix_softc *sc, struct if_rxrinfo *ifri)
581 {
582 	struct if_rxring_info *ifr, ifr1;
583 	struct rx_ring *rxr;
584 	int error, i;
585 	u_int n = 0;
586 
587 	if (sc->num_queues > 1) {
588 		if ((ifr = mallocarray(sc->num_queues, sizeof(*ifr), M_DEVBUF,
589 		    M_WAITOK | M_ZERO)) == NULL)
590 			return (ENOMEM);
591 	} else
592 		ifr = &ifr1;
593 
594 	for (i = 0; i < sc->num_queues; i++) {
595 		rxr = &sc->rx_rings[i];
596 		ifr[n].ifr_size = sc->rx_mbuf_sz;
597 		snprintf(ifr[n].ifr_name, sizeof(ifr[n].ifr_name), "/%d", i);
598 		ifr[n].ifr_info = rxr->rx_ring;
599 		n++;
600 	}
601 
602 	error = if_rxr_info_ioctl(ifri, sc->num_queues, ifr);
603 
604 	if (sc->num_queues > 1)
605 		free(ifr, M_DEVBUF, sc->num_queues * sizeof(*ifr));
606 	return (error);
607 }
608 
609 /*********************************************************************
610  *  Watchdog entry point
611  *
612  **********************************************************************/
613 
614 void
615 ixgbe_watchdog(struct ifnet * ifp)
616 {
617 	struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
618 	struct tx_ring *txr = sc->tx_rings;
619 	struct ixgbe_hw *hw = &sc->hw;
620 	int		tx_hang = FALSE;
621 	int		i;
622 
623 	/*
624 	 * The timer is set to 5 every time ixgbe_start() queues a packet.
625 	 * Anytime all descriptors are clean the timer is set to 0.
626 	 */
627 	for (i = 0; i < sc->num_queues; i++, txr++) {
628 		if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
629 			continue;
630 		else {
631 			tx_hang = TRUE;
632 			break;
633 		}
634 	}
635 	if (tx_hang == FALSE)
636 		return;
637 
638 	/*
639 	 * If we are in this routine because of pause frames, then don't
640 	 * reset the hardware.
641 	 */
642 	if (!(IXGBE_READ_REG(hw, IXGBE_TFCS) & IXGBE_TFCS_TXON)) {
643 		for (i = 0; i < sc->num_queues; i++, txr++)
644 			txr->watchdog_timer = IXGBE_TX_TIMEOUT;
645 		ifp->if_timer = IXGBE_TX_TIMEOUT;
646 		return;
647 	}
648 
649 
650 	printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
651 	for (i = 0; i < sc->num_queues; i++, txr++) {
652 		printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
653 		    IXGBE_READ_REG(hw, IXGBE_TDH(i)),
654 		    IXGBE_READ_REG(hw, IXGBE_TDT(i)));
655 		printf("%s: TX(%d) Next TX to Clean = %d\n", ifp->if_xname,
656 		    i, txr->next_to_clean);
657 	}
658 	ifp->if_flags &= ~IFF_RUNNING;
659 	sc->watchdog_events++;
660 
661 	ixgbe_init(sc);
662 }
663 
664 /*********************************************************************
665  *  Init entry point
666  *
667  *  This routine is used in two ways. It is used by the stack as
668  *  init entry point in network interface structure. It is also used
669  *  by the driver as a hw/sw initialization routine to get to a
670  *  consistent state.
671  *
672  *  return 0 on success, positive on failure
673  **********************************************************************/
674 #define IXGBE_MHADD_MFS_SHIFT 16
675 
676 void
677 ixgbe_init(void *arg)
678 {
679 	struct ix_softc	*sc = (struct ix_softc *)arg;
680 	struct ifnet	*ifp = &sc->arpcom.ac_if;
681 	struct rx_ring	*rxr = sc->rx_rings;
682 	uint32_t	 k, txdctl, rxdctl, rxctrl, mhadd, itr;
683 	int		 i, s, err;
684 
685 	INIT_DEBUGOUT("ixgbe_init: begin");
686 
687 	s = splnet();
688 
689 	ixgbe_stop(sc);
690 
691 	/* reprogram the RAR[0] in case user changed it. */
692 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
693 
694 	/* Get the latest mac address, User can use a LAA */
695 	bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
696 	      IXGBE_ETH_LENGTH_OF_ADDRESS);
697 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, 1);
698 	sc->hw.addr_ctrl.rar_used_count = 1;
699 
700 	/* Prepare transmit descriptors and buffers */
701 	if (ixgbe_setup_transmit_structures(sc)) {
702 		printf("%s: Could not setup transmit structures\n",
703 		    ifp->if_xname);
704 		ixgbe_stop(sc);
705 		splx(s);
706 		return;
707 	}
708 
709 	ixgbe_init_hw(&sc->hw);
710 	ixgbe_initialize_transmit_units(sc);
711 
712 	/* Use 2k clusters, even for jumbo frames */
713 	sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
714 
715 	/* Prepare receive descriptors and buffers */
716 	if (ixgbe_setup_receive_structures(sc)) {
717 		printf("%s: Could not setup receive structures\n",
718 		    ifp->if_xname);
719 		ixgbe_stop(sc);
720 		splx(s);
721 		return;
722 	}
723 
724 	/* Configure RX settings */
725 	ixgbe_initialize_receive_units(sc);
726 
727 	/* Enable SDP & MSIX interrupts based on adapter */
728 	ixgbe_config_gpie(sc);
729 
730 	/* Program promiscuous mode and multicast filters. */
731 	ixgbe_iff(sc);
732 
733 	/* Set MRU size */
734 	mhadd = IXGBE_READ_REG(&sc->hw, IXGBE_MHADD);
735 	mhadd &= ~IXGBE_MHADD_MFS_MASK;
736 	mhadd |= sc->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
737 	IXGBE_WRITE_REG(&sc->hw, IXGBE_MHADD, mhadd);
738 
739 	/* Now enable all the queues */
740 	for (i = 0; i < sc->num_queues; i++) {
741 		txdctl = IXGBE_READ_REG(&sc->hw, IXGBE_TXDCTL(i));
742 		txdctl |= IXGBE_TXDCTL_ENABLE;
743 		/* Set WTHRESH to 8, burst writeback */
744 		txdctl |= (8 << 16);
745 		/*
746 		 * When the internal queue falls below PTHRESH (16),
747 		 * start prefetching as long as there are at least
748 		 * HTHRESH (1) buffers ready.
749 		 */
750 		txdctl |= (16 << 0) | (1 << 8);
751 		IXGBE_WRITE_REG(&sc->hw, IXGBE_TXDCTL(i), txdctl);
752 	}
753 
754 	for (i = 0; i < sc->num_queues; i++) {
755 		rxdctl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
756 		if (sc->hw.mac.type == ixgbe_mac_82598EB) {
757 			/*
758 			 * PTHRESH = 21
759 			 * HTHRESH = 4
760 			 * WTHRESH = 8
761 			 */
762 			rxdctl &= ~0x3FFFFF;
763 			rxdctl |= 0x080420;
764 		}
765 		rxdctl |= IXGBE_RXDCTL_ENABLE;
766 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), rxdctl);
767 		for (k = 0; k < 10; k++) {
768 			if (IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i)) &
769 			    IXGBE_RXDCTL_ENABLE)
770 				break;
771 			else
772 				msec_delay(1);
773 		}
774 		IXGBE_WRITE_FLUSH(&sc->hw);
775 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(i), rxr->last_desc_filled);
776 	}
777 
778 	/* Set up VLAN support and filter */
779 	ixgbe_setup_vlan_hw_support(sc);
780 
781 	/* Enable Receive engine */
782 	rxctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXCTRL);
783 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
784 		rxctrl |= IXGBE_RXCTRL_DMBYPS;
785 	rxctrl |= IXGBE_RXCTRL_RXEN;
786 	sc->hw.mac.ops.enable_rx_dma(&sc->hw, rxctrl);
787 
788 	timeout_add_sec(&sc->timer, 1);
789 
790 	/* Set up MSI/X routing */
791 	if (sc->msix > 1) {
792 		ixgbe_configure_ivars(sc);
793 		/* Set up auto-mask */
794 		if (sc->hw.mac.type == ixgbe_mac_82598EB)
795 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
796 		else {
797 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
798 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
799 		}
800 	} else {  /* Simple settings for Legacy/MSI */
801 		ixgbe_set_ivar(sc, 0, 0, 0);
802 		ixgbe_set_ivar(sc, 0, 0, 1);
803 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
804 	}
805 
806 	/* Check on any SFP devices that need to be kick-started */
807 	if (sc->hw.phy.type == ixgbe_phy_none) {
808 		err = sc->hw.phy.ops.identify(&sc->hw);
809 		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
810 			printf("Unsupported SFP+ module type was detected.\n");
811 			splx(s);
812 			return;
813 		}
814 	}
815 
816 	/* Setup interrupt moderation */
817 	itr = (4000000 / IXGBE_INTS_PER_SEC) & 0xff8;
818 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
819 		itr |= IXGBE_EITR_LLI_MOD | IXGBE_EITR_CNT_WDIS;
820 	IXGBE_WRITE_REG(&sc->hw, IXGBE_EITR(0), itr);
821 
822 	/* Enable power to the phy */
823 	if (sc->hw.phy.ops.set_phy_power)
824 		sc->hw.phy.ops.set_phy_power(&sc->hw, TRUE);
825 
826 	/* Config/Enable Link */
827 	ixgbe_config_link(sc);
828 
829 	/* Hardware Packet Buffer & Flow Control setup */
830 	ixgbe_config_delay_values(sc);
831 
832 	/* Initialize the FC settings */
833 	sc->hw.mac.ops.start_hw(&sc->hw);
834 
835 	/* And now turn on interrupts */
836 	ixgbe_enable_intr(sc);
837 
838 	/* Now inform the stack we're ready */
839 	ifp->if_flags |= IFF_RUNNING;
840 	ifq_clr_oactive(&ifp->if_snd);
841 
842 	splx(s);
843 }
844 
845 void
846 ixgbe_config_gpie(struct ix_softc *sc)
847 {
848 	struct ixgbe_hw	*hw = &sc->hw;
849 	uint32_t gpie;
850 
851 	gpie = IXGBE_READ_REG(&sc->hw, IXGBE_GPIE);
852 
853 	/* Fan Failure Interrupt */
854 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
855 		gpie |= IXGBE_SDP1_GPIEN;
856 
857 	if (sc->hw.mac.type == ixgbe_mac_82599EB) {
858 		/* Add for Module detection */
859 		gpie |= IXGBE_SDP2_GPIEN;
860 
861 		/* Media ready */
862 		if (hw->device_id != IXGBE_DEV_ID_82599_QSFP_SF_QP)
863 			gpie |= IXGBE_SDP1_GPIEN;
864 
865 		/*
866 		 * Set LL interval to max to reduce the number of low latency
867 		 * interrupts hitting the card when the ring is getting full.
868 		 */
869 		gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
870 	}
871 
872 	if (sc->hw.mac.type == ixgbe_mac_X540 ||
873 	    sc->hw.mac.type == ixgbe_mac_X550EM_x ||
874 	    sc->hw.mac.type == ixgbe_mac_X550EM_a) {
875 		/*
876 		 * Thermal Failure Detection (X540)
877 		 * Link Detection (X552 SFP+, X552/X557-AT)
878 		 */
879 		gpie |= IXGBE_SDP0_GPIEN_X540;
880 
881 		/*
882 		 * Set LL interval to max to reduce the number of low latency
883 		 * interrupts hitting the card when the ring is getting full.
884 		 */
885 		gpie |= 0xf << IXGBE_GPIE_LLI_DELAY_SHIFT;
886 	}
887 
888 	if (sc->msix > 1) {
889 		/* Enable Enhanced MSIX mode */
890 		gpie |= IXGBE_GPIE_MSIX_MODE;
891 		gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
892 		    IXGBE_GPIE_OCD;
893 	}
894 
895 	IXGBE_WRITE_REG(&sc->hw, IXGBE_GPIE, gpie);
896 }
897 
898 /*
899  * Requires sc->max_frame_size to be set.
900  */
901 void
902 ixgbe_config_delay_values(struct ix_softc *sc)
903 {
904 	struct ixgbe_hw *hw = &sc->hw;
905 	uint32_t rxpb, frame, size, tmp;
906 
907 	frame = sc->max_frame_size;
908 
909 	/* Calculate High Water */
910 	switch (hw->mac.type) {
911 	case ixgbe_mac_X540:
912 	case ixgbe_mac_X550:
913 	case ixgbe_mac_X550EM_x:
914 	case ixgbe_mac_X550EM_a:
915 		tmp = IXGBE_DV_X540(frame, frame);
916 		break;
917 	default:
918 		tmp = IXGBE_DV(frame, frame);
919 		break;
920 	}
921 	size = IXGBE_BT2KB(tmp);
922 	rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
923 	hw->fc.high_water[0] = rxpb - size;
924 
925 	/* Now calculate Low Water */
926 	switch (hw->mac.type) {
927 	case ixgbe_mac_X540:
928 	case ixgbe_mac_X550:
929 	case ixgbe_mac_X550EM_x:
930 	case ixgbe_mac_X550EM_a:
931 		tmp = IXGBE_LOW_DV_X540(frame);
932 		break;
933 	default:
934 		tmp = IXGBE_LOW_DV(frame);
935 		break;
936 	}
937 	hw->fc.low_water[0] = IXGBE_BT2KB(tmp);
938 
939 	hw->fc.requested_mode = sc->fc;
940 	hw->fc.pause_time = IXGBE_FC_PAUSE;
941 	hw->fc.send_xon = TRUE;
942 }
943 
944 /*
945  * MSIX Interrupt Handlers
946  */
947 void
948 ixgbe_enable_queue(struct ix_softc *sc, uint32_t vector)
949 {
950 	uint64_t queue = 1ULL << vector;
951 	uint32_t mask;
952 
953 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
954 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
955 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS, mask);
956 	} else {
957 		mask = (queue & 0xFFFFFFFF);
958 		if (mask)
959 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(0), mask);
960 		mask = (queue >> 32);
961 		if (mask)
962 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMS_EX(1), mask);
963 	}
964 }
965 
966 void
967 ixgbe_disable_queue(struct ix_softc *sc, uint32_t vector)
968 {
969 	uint64_t queue = 1ULL << vector;
970 	uint32_t mask;
971 
972 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
973 		mask = (IXGBE_EIMS_RTX_QUEUE & queue);
974 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, mask);
975 	} else {
976 		mask = (queue & 0xFFFFFFFF);
977 		if (mask)
978 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), mask);
979 		mask = (queue >> 32);
980 		if (mask)
981 			IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), mask);
982 	}
983 }
984 
985 /*********************************************************************
986  *
987  *  Legacy Interrupt Service routine
988  *
989  **********************************************************************/
990 
991 int
992 ixgbe_intr(void *arg)
993 {
994 	struct ix_softc	*sc = (struct ix_softc *)arg;
995 	struct ix_queue *que = sc->queues;
996 	struct ifnet	*ifp = &sc->arpcom.ac_if;
997 	struct tx_ring	*txr = sc->tx_rings;
998 	struct ixgbe_hw	*hw = &sc->hw;
999 	uint32_t	 reg_eicr, mod_mask, msf_mask;
1000 	int		 i, refill = 0;
1001 
1002 	reg_eicr = IXGBE_READ_REG(&sc->hw, IXGBE_EICR);
1003 	if (reg_eicr == 0) {
1004 		ixgbe_enable_intr(sc);
1005 		return (0);
1006 	}
1007 
1008 	if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1009 		ixgbe_rxeof(que);
1010 		ixgbe_txeof(txr);
1011 		refill = 1;
1012 	}
1013 
1014 	if (refill) {
1015 		if (ixgbe_rxfill(que->rxr)) {
1016 			/* Advance the Rx Queue "Tail Pointer" */
1017 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
1018 			    que->rxr->last_desc_filled);
1019 		} else
1020 			timeout_add(&sc->rx_refill, 1);
1021 	}
1022 
1023 	/* Link status change */
1024 	if (reg_eicr & IXGBE_EICR_LSC) {
1025 		KERNEL_LOCK();
1026 		ixgbe_update_link_status(sc);
1027 		KERNEL_UNLOCK();
1028 		ifq_start(&ifp->if_snd);
1029 	}
1030 
1031 	if (hw->mac.type != ixgbe_mac_82598EB) {
1032 		if (reg_eicr & IXGBE_EICR_ECC) {
1033 			printf("%s: CRITICAL: ECC ERROR!! "
1034 			    "Please Reboot!!\n", sc->dev.dv_xname);
1035 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
1036 		}
1037 		/* Check for over temp condition */
1038 		if (reg_eicr & IXGBE_EICR_TS) {
1039 			printf("%s: CRITICAL: OVER TEMP!! "
1040 			    "PHY IS SHUT DOWN!!\n", ifp->if_xname);
1041 			IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
1042 		}
1043 	}
1044 
1045 	/* Pluggable optics-related interrupt */
1046 	if (ixgbe_is_sfp(hw)) {
1047 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) {
1048 			mod_mask = IXGBE_EICR_GPI_SDP0_X540;
1049 			msf_mask = IXGBE_EICR_GPI_SDP1_X540;
1050 		} else if (hw->mac.type == ixgbe_mac_X540 ||
1051 		    hw->mac.type == ixgbe_mac_X550 ||
1052 		    hw->mac.type == ixgbe_mac_X550EM_x) {
1053 			mod_mask = IXGBE_EICR_GPI_SDP2_X540;
1054 			msf_mask = IXGBE_EICR_GPI_SDP1_X540;
1055 		} else {
1056 			mod_mask = IXGBE_EICR_GPI_SDP2;
1057 			msf_mask = IXGBE_EICR_GPI_SDP1;
1058 		}
1059 		if (reg_eicr & mod_mask) {
1060 			/* Clear the interrupt */
1061 			IXGBE_WRITE_REG(hw, IXGBE_EICR, mod_mask);
1062 			KERNEL_LOCK();
1063 			ixgbe_handle_mod(sc);
1064 			KERNEL_UNLOCK();
1065 		} else if ((hw->phy.media_type != ixgbe_media_type_copper) &&
1066 		    (reg_eicr & msf_mask)) {
1067 			/* Clear the interrupt */
1068 			IXGBE_WRITE_REG(hw, IXGBE_EICR, msf_mask);
1069 			KERNEL_LOCK();
1070 			ixgbe_handle_msf(sc);
1071 			KERNEL_UNLOCK();
1072 		}
1073 	}
1074 
1075 	/* Check for fan failure */
1076 	if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
1077 	    (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
1078 		printf("%s: CRITICAL: FAN FAILURE!! "
1079 		    "REPLACE IMMEDIATELY!!\n", ifp->if_xname);
1080 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1081 	}
1082 
1083 	/* External PHY interrupt */
1084 	if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
1085 	    (reg_eicr & IXGBE_EICR_GPI_SDP0_X540)) {
1086 		/* Clear the interrupt */
1087 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0_X540);
1088 		KERNEL_LOCK();
1089 		ixgbe_handle_phy(sc);
1090 		KERNEL_UNLOCK();
1091 	}
1092 
1093 	for (i = 0; i < sc->num_queues; i++, que++)
1094 		ixgbe_enable_queue(sc, que->msix);
1095 
1096 	return (1);
1097 }
1098 
1099 /*********************************************************************
1100  *
1101  *  Media Ioctl callback
1102  *
1103  *  This routine is called whenever the user queries the status of
1104  *  the interface using ifconfig.
1105  *
1106  **********************************************************************/
1107 void
1108 ixgbe_media_status(struct ifnet * ifp, struct ifmediareq *ifmr)
1109 {
1110 	struct ix_softc *sc = ifp->if_softc;
1111 	uint64_t layer;
1112 
1113 	ifmr->ifm_active = IFM_ETHER;
1114 	ifmr->ifm_status = IFM_AVALID;
1115 
1116 	INIT_DEBUGOUT("ixgbe_media_status: begin");
1117 	ixgbe_update_link_status(sc);
1118 
1119 	if (!LINK_STATE_IS_UP(ifp->if_link_state))
1120 		return;
1121 
1122 	ifmr->ifm_status |= IFM_ACTIVE;
1123 	layer = sc->phy_layer;
1124 
1125 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
1126 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
1127 	    layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
1128 	    layer & IXGBE_PHYSICAL_LAYER_10BASE_T) {
1129 		switch (sc->link_speed) {
1130 		case IXGBE_LINK_SPEED_10GB_FULL:
1131 			ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
1132 			break;
1133 		case IXGBE_LINK_SPEED_1GB_FULL:
1134 			ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
1135 			break;
1136 		case IXGBE_LINK_SPEED_100_FULL:
1137 			ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
1138 			break;
1139 		case IXGBE_LINK_SPEED_10_FULL:
1140 			ifmr->ifm_active |= IFM_10_T | IFM_FDX;
1141 			break;
1142 		}
1143 	}
1144 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1145 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
1146 		switch (sc->link_speed) {
1147 		case IXGBE_LINK_SPEED_10GB_FULL:
1148 			ifmr->ifm_active |= IFM_10G_SFP_CU | IFM_FDX;
1149 			break;
1150 		}
1151 	}
1152 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1153 		switch (sc->link_speed) {
1154 		case IXGBE_LINK_SPEED_10GB_FULL:
1155 			ifmr->ifm_active |= IFM_10G_LR | IFM_FDX;
1156 			break;
1157 		case IXGBE_LINK_SPEED_1GB_FULL:
1158 			ifmr->ifm_active |= IFM_1000_LX | IFM_FDX;
1159 			break;
1160 		}
1161 	}
1162 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR ||
1163 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX) {
1164 		switch (sc->link_speed) {
1165 		case IXGBE_LINK_SPEED_10GB_FULL:
1166 			ifmr->ifm_active |= IFM_10G_SR | IFM_FDX;
1167 			break;
1168 		case IXGBE_LINK_SPEED_1GB_FULL:
1169 			ifmr->ifm_active |= IFM_1000_SX | IFM_FDX;
1170 			break;
1171 		}
1172 	}
1173 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4) {
1174 		switch (sc->link_speed) {
1175 		case IXGBE_LINK_SPEED_10GB_FULL:
1176 			ifmr->ifm_active |= IFM_10G_CX4 | IFM_FDX;
1177 			break;
1178 		}
1179 	}
1180 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR) {
1181 		switch (sc->link_speed) {
1182 		case IXGBE_LINK_SPEED_10GB_FULL:
1183 			ifmr->ifm_active |= IFM_10G_KR | IFM_FDX;
1184 			break;
1185 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1186 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1187 			break;
1188 		case IXGBE_LINK_SPEED_1GB_FULL:
1189 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1190 			break;
1191 		}
1192 	} else if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4 ||
1193 	    layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX ||
1194 	    layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX) {
1195 		switch (sc->link_speed) {
1196 		case IXGBE_LINK_SPEED_10GB_FULL:
1197 			ifmr->ifm_active |= IFM_10G_KX4 | IFM_FDX;
1198 			break;
1199 		case IXGBE_LINK_SPEED_2_5GB_FULL:
1200 			ifmr->ifm_active |= IFM_2500_KX | IFM_FDX;
1201 			break;
1202 		case IXGBE_LINK_SPEED_1GB_FULL:
1203 			ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
1204 			break;
1205 		}
1206 	}
1207 
1208 	switch (sc->hw.fc.current_mode) {
1209 	case ixgbe_fc_tx_pause:
1210 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_TXPAUSE;
1211 		break;
1212 	case ixgbe_fc_rx_pause:
1213 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE;
1214 		break;
1215 	case ixgbe_fc_full:
1216 		ifmr->ifm_active |= IFM_FLOW | IFM_ETH_RXPAUSE |
1217 		    IFM_ETH_TXPAUSE;
1218 		break;
1219 	default:
1220 		ifmr->ifm_active &= ~(IFM_FLOW | IFM_ETH_RXPAUSE |
1221 		    IFM_ETH_TXPAUSE);
1222 		break;
1223 	}
1224 }
1225 
1226 /*********************************************************************
1227  *
1228  *  Media Ioctl callback
1229  *
1230  *  This routine is called when the user changes speed/duplex using
1231  *  media/mediopt option with ifconfig.
1232  *
1233  **********************************************************************/
1234 int
1235 ixgbe_media_change(struct ifnet *ifp)
1236 {
1237 	struct ix_softc	*sc = ifp->if_softc;
1238 	struct ixgbe_hw	*hw = &sc->hw;
1239 	struct ifmedia	*ifm = &sc->media;
1240 	ixgbe_link_speed speed = 0;
1241 
1242 	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1243 		return (EINVAL);
1244 
1245 	if (hw->phy.media_type == ixgbe_media_type_backplane)
1246 		return (ENODEV);
1247 
1248 	switch (IFM_SUBTYPE(ifm->ifm_media)) {
1249 		case IFM_AUTO:
1250 		case IFM_10G_T:
1251 			speed |= IXGBE_LINK_SPEED_100_FULL;
1252 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1253 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1254 			break;
1255 		case IFM_10G_SR:
1256 		case IFM_10G_KR:
1257 		case IFM_10G_LR:
1258 		case IFM_10G_LRM:
1259 		case IFM_10G_CX4:
1260 		case IFM_10G_KX4:
1261 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1262 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1263 			break;
1264 		case IFM_10G_SFP_CU:
1265 			speed |= IXGBE_LINK_SPEED_10GB_FULL;
1266 			break;
1267 		case IFM_1000_T:
1268 			speed |= IXGBE_LINK_SPEED_100_FULL;
1269 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1270 			break;
1271 		case IFM_1000_LX:
1272 		case IFM_1000_SX:
1273 		case IFM_1000_CX:
1274 		case IFM_1000_KX:
1275 			speed |= IXGBE_LINK_SPEED_1GB_FULL;
1276 			break;
1277 		case IFM_100_TX:
1278 			speed |= IXGBE_LINK_SPEED_100_FULL;
1279 			break;
1280 		case IFM_10_T:
1281 			speed |= IXGBE_LINK_SPEED_10_FULL;
1282 			break;
1283 		default:
1284 			return (EINVAL);
1285 	}
1286 
1287 	hw->mac.autotry_restart = TRUE;
1288 	hw->mac.ops.setup_link(hw, speed, TRUE);
1289 
1290 	return (0);
1291 }
1292 
1293 /*********************************************************************
1294  *
1295  *  This routine maps the mbufs to tx descriptors, allowing the
1296  *  TX engine to transmit the packets.
1297  *  	- return 0 on success, positive on failure
1298  *
1299  **********************************************************************/
1300 
1301 int
1302 ixgbe_encap(struct tx_ring *txr, struct mbuf *m_head)
1303 {
1304 	struct ix_softc *sc = txr->sc;
1305 	uint32_t	olinfo_status = 0, cmd_type_len;
1306 	int             i, j, ntxc;
1307 	int		first, last = 0;
1308 	bus_dmamap_t	map;
1309 	struct ixgbe_tx_buf *txbuf;
1310 	union ixgbe_adv_tx_desc *txd = NULL;
1311 
1312 	/* Basic descriptor defines */
1313 	cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
1314 	    IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
1315 
1316 #if NVLAN > 0
1317 	if (m_head->m_flags & M_VLANTAG)
1318 		cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
1319 #endif
1320 
1321 	/*
1322 	 * Important to capture the first descriptor
1323 	 * used because it will contain the index of
1324 	 * the one we tell the hardware to report back
1325 	 */
1326 	first = txr->next_avail_desc;
1327 	txbuf = &txr->tx_buffers[first];
1328 	map = txbuf->map;
1329 
1330 	/*
1331 	 * Map the packet for DMA.
1332 	 */
1333 	switch (bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1334 	    m_head, BUS_DMA_NOWAIT)) {
1335 	case 0:
1336 		break;
1337 	case EFBIG:
1338 		if (m_defrag(m_head, M_NOWAIT) == 0 &&
1339 		    bus_dmamap_load_mbuf(txr->txdma.dma_tag, map,
1340 		     m_head, BUS_DMA_NOWAIT) == 0)
1341 			break;
1342 		/* FALLTHROUGH */
1343 	default:
1344 		sc->no_tx_dma_setup++;
1345 		return (0);
1346 	}
1347 
1348 	/*
1349 	 * Set the appropriate offload context
1350 	 * this will becomes the first descriptor.
1351 	 */
1352 	ntxc = ixgbe_tx_ctx_setup(txr, m_head, &cmd_type_len, &olinfo_status);
1353 	if (ntxc == -1)
1354 		goto xmit_fail;
1355 
1356 	i = txr->next_avail_desc + ntxc;
1357 	if (i >= sc->num_tx_desc)
1358 		i -= sc->num_tx_desc;
1359 
1360 	for (j = 0; j < map->dm_nsegs; j++) {
1361 		txd = &txr->tx_base[i];
1362 
1363 		txd->read.buffer_addr = htole64(map->dm_segs[j].ds_addr);
1364 		txd->read.cmd_type_len = htole32(txr->txd_cmd |
1365 		    cmd_type_len | map->dm_segs[j].ds_len);
1366 		txd->read.olinfo_status = htole32(olinfo_status);
1367 		last = i; /* descriptor that will get completion IRQ */
1368 
1369 		if (++i == sc->num_tx_desc)
1370 			i = 0;
1371 	}
1372 
1373 	txd->read.cmd_type_len |=
1374 	    htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
1375 
1376 	bus_dmamap_sync(txr->txdma.dma_tag, map, 0, map->dm_mapsize,
1377 	    BUS_DMASYNC_PREWRITE);
1378 
1379 	/* Set the index of the descriptor that will be marked done */
1380 	txbuf->m_head = m_head;
1381 	txbuf->eop_index = last;
1382 
1383 	membar_producer();
1384 
1385 	txr->next_avail_desc = i;
1386 
1387 	++txr->tx_packets;
1388 	return (ntxc + j);
1389 
1390 xmit_fail:
1391 	bus_dmamap_unload(txr->txdma.dma_tag, txbuf->map);
1392 	return (0);
1393 }
1394 
1395 void
1396 ixgbe_iff(struct ix_softc *sc)
1397 {
1398 	struct ifnet *ifp = &sc->arpcom.ac_if;
1399 	struct arpcom *ac = &sc->arpcom;
1400 	uint32_t	fctrl;
1401 	uint8_t	*mta;
1402 	uint8_t	*update_ptr;
1403 	struct ether_multi *enm;
1404 	struct ether_multistep step;
1405 	int	mcnt = 0;
1406 
1407 	IOCTL_DEBUGOUT("ixgbe_iff: begin");
1408 
1409 	mta = sc->mta;
1410 	bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
1411 	    MAX_NUM_MULTICAST_ADDRESSES);
1412 
1413 	fctrl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
1414 	fctrl &= ~(IXGBE_FCTRL_MPE | IXGBE_FCTRL_UPE);
1415 	ifp->if_flags &= ~IFF_ALLMULTI;
1416 
1417 	if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
1418 	    ac->ac_multicnt > MAX_NUM_MULTICAST_ADDRESSES) {
1419 		ifp->if_flags |= IFF_ALLMULTI;
1420 		fctrl |= IXGBE_FCTRL_MPE;
1421 		if (ifp->if_flags & IFF_PROMISC)
1422 			fctrl |= IXGBE_FCTRL_UPE;
1423 	} else {
1424 		ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
1425 		while (enm != NULL) {
1426 			bcopy(enm->enm_addrlo,
1427 			    &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
1428 			    IXGBE_ETH_LENGTH_OF_ADDRESS);
1429 			mcnt++;
1430 
1431 			ETHER_NEXT_MULTI(step, enm);
1432 		}
1433 
1434 		update_ptr = mta;
1435 		sc->hw.mac.ops.update_mc_addr_list(&sc->hw, update_ptr, mcnt,
1436 		    ixgbe_mc_array_itr, TRUE);
1437 	}
1438 
1439 	IXGBE_WRITE_REG(&sc->hw, IXGBE_FCTRL, fctrl);
1440 }
1441 
1442 /*
1443  * This is an iterator function now needed by the multicast
1444  * shared code. It simply feeds the shared code routine the
1445  * addresses in the array of ixgbe_iff() one by one.
1446  */
1447 uint8_t *
1448 ixgbe_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
1449 {
1450 	uint8_t *addr = *update_ptr;
1451 	uint8_t *newptr;
1452 	*vmdq = 0;
1453 
1454 	newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
1455 	*update_ptr = newptr;
1456 	return addr;
1457 }
1458 
1459 void
1460 ixgbe_local_timer(void *arg)
1461 {
1462 	struct ix_softc *sc = arg;
1463 #ifdef IX_DEBUG
1464 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1465 #endif
1466 	int		 s;
1467 
1468 	s = splnet();
1469 
1470 	ixgbe_update_stats_counters(sc);
1471 
1472 #ifdef IX_DEBUG
1473 	if ((ifp->if_flags & (IFF_RUNNING|IFF_DEBUG)) ==
1474 	    (IFF_RUNNING|IFF_DEBUG))
1475 		ixgbe_print_hw_stats(sc);
1476 #endif
1477 
1478 	timeout_add_sec(&sc->timer, 1);
1479 
1480 	splx(s);
1481 }
1482 
1483 void
1484 ixgbe_update_link_status(struct ix_softc *sc)
1485 {
1486 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1487 	int		link_state = LINK_STATE_DOWN;
1488 
1489 	ixgbe_check_link(&sc->hw, &sc->link_speed, &sc->link_up, 0);
1490 
1491 	ifp->if_baudrate = 0;
1492 	if (sc->link_up) {
1493 		link_state = LINK_STATE_FULL_DUPLEX;
1494 
1495 		switch (sc->link_speed) {
1496 		case IXGBE_LINK_SPEED_UNKNOWN:
1497 			ifp->if_baudrate = 0;
1498 			break;
1499 		case IXGBE_LINK_SPEED_100_FULL:
1500 			ifp->if_baudrate = IF_Mbps(100);
1501 			break;
1502 		case IXGBE_LINK_SPEED_1GB_FULL:
1503 			ifp->if_baudrate = IF_Gbps(1);
1504 			break;
1505 		case IXGBE_LINK_SPEED_10GB_FULL:
1506 			ifp->if_baudrate = IF_Gbps(10);
1507 			break;
1508 		}
1509 
1510 		/* Update any Flow Control changes */
1511 		sc->hw.mac.ops.fc_enable(&sc->hw);
1512 	}
1513 	if (ifp->if_link_state != link_state) {
1514 		ifp->if_link_state = link_state;
1515 		if_link_state_change(ifp);
1516 	}
1517 }
1518 
1519 
1520 /*********************************************************************
1521  *
1522  *  This routine disables all traffic on the adapter by issuing a
1523  *  global reset on the MAC and deallocates TX/RX buffers.
1524  *
1525  **********************************************************************/
1526 
1527 void
1528 ixgbe_stop(void *arg)
1529 {
1530 	struct ix_softc *sc = arg;
1531 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1532 
1533 	/* Tell the stack that the interface is no longer active */
1534 	ifp->if_flags &= ~IFF_RUNNING;
1535 
1536 	INIT_DEBUGOUT("ixgbe_stop: begin\n");
1537 	ixgbe_disable_intr(sc);
1538 
1539 	sc->hw.mac.ops.reset_hw(&sc->hw);
1540 	sc->hw.adapter_stopped = FALSE;
1541 	sc->hw.mac.ops.stop_adapter(&sc->hw);
1542 	if (sc->hw.mac.type == ixgbe_mac_82599EB)
1543 		sc->hw.mac.ops.stop_mac_link_on_d3(&sc->hw);
1544 	/* Turn off the laser */
1545 	if (sc->hw.mac.ops.disable_tx_laser)
1546 		sc->hw.mac.ops.disable_tx_laser(&sc->hw);
1547 	timeout_del(&sc->timer);
1548 	timeout_del(&sc->rx_refill);
1549 
1550 	/* reprogram the RAR[0] in case user changed it. */
1551 	ixgbe_set_rar(&sc->hw, 0, sc->hw.mac.addr, 0, IXGBE_RAH_AV);
1552 
1553 	ifq_barrier(&ifp->if_snd);
1554 	intr_barrier(sc->tag);
1555 
1556 	KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
1557 
1558 	ifq_clr_oactive(&ifp->if_snd);
1559 
1560 	/* Should we really clear all structures on stop? */
1561 	ixgbe_free_transmit_structures(sc);
1562 	ixgbe_free_receive_structures(sc);
1563 }
1564 
1565 
1566 /*********************************************************************
1567  *
1568  *  Determine hardware revision.
1569  *
1570  **********************************************************************/
1571 void
1572 ixgbe_identify_hardware(struct ix_softc *sc)
1573 {
1574 	struct ixgbe_osdep	*os = &sc->osdep;
1575 	struct pci_attach_args	*pa = &os->os_pa;
1576 	uint32_t		 reg;
1577 
1578 	/* Save off the information about this board */
1579 	sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
1580 	sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
1581 
1582 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
1583 	sc->hw.revision_id = PCI_REVISION(reg);
1584 
1585 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
1586 	sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
1587 	sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
1588 
1589 	/* We need this here to set the num_segs below */
1590 	ixgbe_set_mac_type(&sc->hw);
1591 
1592 	/* Pick up the 82599 and VF settings */
1593 	if (sc->hw.mac.type != ixgbe_mac_82598EB)
1594 		sc->hw.phy.smart_speed = ixgbe_smart_speed;
1595 	sc->num_segs = IXGBE_82599_SCATTER;
1596 }
1597 
1598 /*********************************************************************
1599  *
1600  *  Setup the Legacy or MSI Interrupt handler
1601  *
1602  **********************************************************************/
1603 int
1604 ixgbe_allocate_legacy(struct ix_softc *sc)
1605 {
1606 	struct ixgbe_osdep	*os = &sc->osdep;
1607 	struct pci_attach_args	*pa = &os->os_pa;
1608 	const char		*intrstr = NULL;
1609 	pci_chipset_tag_t	pc = pa->pa_pc;
1610 	pci_intr_handle_t	ih;
1611 
1612 	/* We allocate a single interrupt resource */
1613 	if (pci_intr_map_msi(pa, &ih) != 0 &&
1614 	    pci_intr_map(pa, &ih) != 0) {
1615 		printf(": couldn't map interrupt\n");
1616 		return (ENXIO);
1617 	}
1618 
1619 #if 0
1620 	/* XXX */
1621 	/* Tasklets for Link, SFP and Multispeed Fiber */
1622 	TASK_INIT(&sc->link_task, 0, ixgbe_handle_link, sc);
1623 	TASK_INIT(&sc->mod_task, 0, ixgbe_handle_mod, sc);
1624 	TASK_INIT(&sc->msf_task, 0, ixgbe_handle_msf, sc);
1625 #endif
1626 
1627 	intrstr = pci_intr_string(pc, ih);
1628 	sc->tag = pci_intr_establish(pc, ih, IPL_NET | IPL_MPSAFE,
1629 	    ixgbe_intr, sc, sc->dev.dv_xname);
1630 	if (sc->tag == NULL) {
1631 		printf(": couldn't establish interrupt");
1632 		if (intrstr != NULL)
1633 			printf(" at %s", intrstr);
1634 		printf("\n");
1635 		return (ENXIO);
1636 	}
1637 	printf(": %s", intrstr);
1638 
1639 	/* For simplicity in the handlers */
1640 	sc->que_mask = IXGBE_EIMS_ENABLE_MASK;
1641 
1642 	return (0);
1643 }
1644 
1645 int
1646 ixgbe_allocate_pci_resources(struct ix_softc *sc)
1647 {
1648 	struct ixgbe_osdep	*os = &sc->osdep;
1649 	struct pci_attach_args	*pa = &os->os_pa;
1650 	int			 val;
1651 
1652 	val = pci_conf_read(pa->pa_pc, pa->pa_tag, PCIR_BAR(0));
1653 	if (PCI_MAPREG_TYPE(val) != PCI_MAPREG_TYPE_MEM) {
1654 		printf(": mmba is not mem space\n");
1655 		return (ENXIO);
1656 	}
1657 
1658 	if (pci_mapreg_map(pa, PCIR_BAR(0), PCI_MAPREG_MEM_TYPE(val), 0,
1659 	    &os->os_memt, &os->os_memh, &os->os_membase, &os->os_memsize, 0)) {
1660 		printf(": cannot find mem space\n");
1661 		return (ENXIO);
1662 	}
1663 	sc->hw.hw_addr = (uint8_t *)os->os_membase;
1664 
1665 	/* Legacy defaults */
1666 	sc->num_queues = 1;
1667 	sc->hw.back = os;
1668 
1669 #ifdef notyet
1670 	/* Now setup MSI or MSI/X, return us the number of supported vectors. */
1671 	sc->msix = ixgbe_setup_msix(sc);
1672 #endif
1673 
1674 	return (0);
1675 }
1676 
1677 void
1678 ixgbe_free_pci_resources(struct ix_softc * sc)
1679 {
1680 	struct ixgbe_osdep	*os = &sc->osdep;
1681 	struct pci_attach_args	*pa = &os->os_pa;
1682 	struct ix_queue *que = sc->queues;
1683 	int i;
1684 
1685 	/* Release all msix queue resources: */
1686 	for (i = 0; i < sc->num_queues; i++, que++) {
1687 		if (que->tag)
1688 			pci_intr_disestablish(pa->pa_pc, que->tag);
1689 		que->tag = NULL;
1690 	}
1691 
1692 	if (sc->tag)
1693 		pci_intr_disestablish(pa->pa_pc, sc->tag);
1694 	sc->tag = NULL;
1695 	if (os->os_membase != 0)
1696 		bus_space_unmap(os->os_memt, os->os_memh, os->os_memsize);
1697 	os->os_membase = 0;
1698 }
1699 
1700 /*********************************************************************
1701  *
1702  *  Setup networking device structure and register an interface.
1703  *
1704  **********************************************************************/
1705 void
1706 ixgbe_setup_interface(struct ix_softc *sc)
1707 {
1708 	struct ifnet   *ifp = &sc->arpcom.ac_if;
1709 
1710 	strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
1711 	ifp->if_softc = sc;
1712 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1713 	ifp->if_xflags = IFXF_MPSAFE;
1714 	ifp->if_ioctl = ixgbe_ioctl;
1715 	ifp->if_qstart = ixgbe_start;
1716 	ifp->if_timer = 0;
1717 	ifp->if_watchdog = ixgbe_watchdog;
1718 	ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
1719 	    ETHER_HDR_LEN - ETHER_CRC_LEN;
1720 	IFQ_SET_MAXLEN(&ifp->if_snd, sc->num_tx_desc - 1);
1721 
1722 	ifp->if_capabilities = IFCAP_VLAN_MTU;
1723 
1724 #if NVLAN > 0
1725 	ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1726 #endif
1727 
1728 	ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
1729 
1730 	/*
1731 	 * Specify the media types supported by this sc and register
1732 	 * callbacks to update media and link information
1733 	 */
1734 	ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
1735 	    ixgbe_media_status);
1736 	ixgbe_add_media_types(sc);
1737 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
1738 
1739 	if_attach(ifp);
1740 	ether_ifattach(ifp);
1741 
1742 	sc->max_frame_size = IXGBE_MAX_FRAME_SIZE;
1743 }
1744 
1745 void
1746 ixgbe_add_media_types(struct ix_softc *sc)
1747 {
1748 	struct ixgbe_hw	*hw = &sc->hw;
1749 	uint64_t layer;
1750 
1751 	sc->phy_layer = hw->mac.ops.get_supported_physical_layer(hw);
1752 	layer = sc->phy_layer;
1753 
1754 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
1755 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
1756 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
1757 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1758 	if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
1759 		ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
1760 	if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
1761 	    layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
1762 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SFP_CU, 0, NULL);
1763 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
1764 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
1765 		if (hw->phy.multispeed_fiber)
1766 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_LX, 0,
1767 			    NULL);
1768 	}
1769 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
1770 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
1771 		if (hw->phy.multispeed_fiber)
1772 			ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0,
1773 			    NULL);
1774 	} else if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_SX)
1775 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
1776 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_CX4)
1777 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
1778 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KR)
1779 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
1780 	if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_KX4)
1781 		ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
1782 	if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_KX)
1783 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
1784 	if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_KX)
1785 		ifmedia_add(&sc->media, IFM_ETHER | IFM_2500_KX, 0, NULL);
1786 
1787 	if (hw->device_id == IXGBE_DEV_ID_82598AT) {
1788 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0,
1789 		    NULL);
1790 		ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
1791 	}
1792 
1793 	ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
1794 }
1795 
1796 void
1797 ixgbe_config_link(struct ix_softc *sc)
1798 {
1799 	uint32_t	autoneg, err = 0;
1800 	bool		negotiate;
1801 
1802 	if (ixgbe_is_sfp(&sc->hw)) {
1803 		if (sc->hw.phy.multispeed_fiber) {
1804 			sc->hw.mac.ops.setup_sfp(&sc->hw);
1805 			if (sc->hw.mac.ops.enable_tx_laser)
1806 				sc->hw.mac.ops.enable_tx_laser(&sc->hw);
1807 			ixgbe_handle_msf(sc);
1808 		} else
1809 			ixgbe_handle_mod(sc);
1810 	} else {
1811 		if (sc->hw.mac.ops.check_link)
1812 			err = sc->hw.mac.ops.check_link(&sc->hw, &autoneg,
1813 			    &sc->link_up, FALSE);
1814 		if (err)
1815 			return;
1816 		autoneg = sc->hw.phy.autoneg_advertised;
1817 		if ((!autoneg) && (sc->hw.mac.ops.get_link_capabilities))
1818 			err = sc->hw.mac.ops.get_link_capabilities(&sc->hw,
1819 			    &autoneg, &negotiate);
1820 		if (err)
1821 			return;
1822 		if (sc->hw.mac.ops.setup_link)
1823 			sc->hw.mac.ops.setup_link(&sc->hw,
1824 			    autoneg, sc->link_up);
1825 	}
1826 }
1827 
1828 /********************************************************************
1829  * Manage DMA'able memory.
1830   *******************************************************************/
1831 int
1832 ixgbe_dma_malloc(struct ix_softc *sc, bus_size_t size,
1833 		struct ixgbe_dma_alloc *dma, int mapflags)
1834 {
1835 	struct ifnet		*ifp = &sc->arpcom.ac_if;
1836 	struct ixgbe_osdep	*os = &sc->osdep;
1837 	int			 r;
1838 
1839 	dma->dma_tag = os->os_pa.pa_dmat;
1840 	r = bus_dmamap_create(dma->dma_tag, size, 1,
1841 	    size, 0, BUS_DMA_NOWAIT, &dma->dma_map);
1842 	if (r != 0) {
1843 		printf("%s: ixgbe_dma_malloc: bus_dmamap_create failed; "
1844 		       "error %u\n", ifp->if_xname, r);
1845 		goto fail_0;
1846 	}
1847 
1848 	r = bus_dmamem_alloc(dma->dma_tag, size, PAGE_SIZE, 0, &dma->dma_seg,
1849 	    1, &dma->dma_nseg, BUS_DMA_NOWAIT);
1850 	if (r != 0) {
1851 		printf("%s: ixgbe_dma_malloc: bus_dmamem_alloc failed; "
1852 		       "error %u\n", ifp->if_xname, r);
1853 		goto fail_1;
1854 	}
1855 
1856 	r = bus_dmamem_map(dma->dma_tag, &dma->dma_seg, dma->dma_nseg, size,
1857 	    &dma->dma_vaddr, BUS_DMA_NOWAIT);
1858 	if (r != 0) {
1859 		printf("%s: ixgbe_dma_malloc: bus_dmamem_map failed; "
1860 		       "error %u\n", ifp->if_xname, r);
1861 		goto fail_2;
1862 	}
1863 
1864 	r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1865 	    size, NULL, mapflags | BUS_DMA_NOWAIT);
1866 	if (r != 0) {
1867 		printf("%s: ixgbe_dma_malloc: bus_dmamap_load failed; "
1868 		       "error %u\n", ifp->if_xname, r);
1869 		goto fail_3;
1870 	}
1871 
1872 	dma->dma_size = size;
1873 	return (0);
1874 fail_3:
1875 	bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, size);
1876 fail_2:
1877 	bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1878 fail_1:
1879 	bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1880 fail_0:
1881 	dma->dma_map = NULL;
1882 	dma->dma_tag = NULL;
1883 	return (r);
1884 }
1885 
1886 void
1887 ixgbe_dma_free(struct ix_softc *sc, struct ixgbe_dma_alloc *dma)
1888 {
1889 	if (dma->dma_tag == NULL)
1890 		return;
1891 
1892 	if (dma->dma_map != NULL) {
1893 		bus_dmamap_sync(dma->dma_tag, dma->dma_map, 0,
1894 		    dma->dma_map->dm_mapsize,
1895 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1896 		bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1897 		bus_dmamem_unmap(dma->dma_tag, dma->dma_vaddr, dma->dma_size);
1898 		bus_dmamem_free(dma->dma_tag, &dma->dma_seg, dma->dma_nseg);
1899 		bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1900 		dma->dma_map = NULL;
1901 	}
1902 }
1903 
1904 
1905 /*********************************************************************
1906  *
1907  *  Allocate memory for the transmit and receive rings, and then
1908  *  the descriptors associated with each, called only once at attach.
1909  *
1910  **********************************************************************/
1911 int
1912 ixgbe_allocate_queues(struct ix_softc *sc)
1913 {
1914 	struct ifnet	*ifp = &sc->arpcom.ac_if;
1915 	struct ix_queue *que;
1916 	struct tx_ring *txr;
1917 	struct rx_ring *rxr;
1918 	int rsize, tsize;
1919 	int txconf = 0, rxconf = 0, i;
1920 
1921 	/* First allocate the top level queue structs */
1922 	if (!(sc->queues = mallocarray(sc->num_queues,
1923 	    sizeof(struct ix_queue), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1924 		printf("%s: Unable to allocate queue memory\n", ifp->if_xname);
1925 		goto fail;
1926 	}
1927 
1928 	/* Then allocate the TX ring struct memory */
1929 	if (!(sc->tx_rings = mallocarray(sc->num_queues,
1930 	    sizeof(struct tx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1931 		printf("%s: Unable to allocate TX ring memory\n", ifp->if_xname);
1932 		goto fail;
1933 	}
1934 
1935 	/* Next allocate the RX */
1936 	if (!(sc->rx_rings = mallocarray(sc->num_queues,
1937 	    sizeof(struct rx_ring), M_DEVBUF, M_NOWAIT | M_ZERO))) {
1938 		printf("%s: Unable to allocate RX ring memory\n", ifp->if_xname);
1939 		goto rx_fail;
1940 	}
1941 
1942 	/* For the ring itself */
1943 	tsize = roundup2(sc->num_tx_desc *
1944 	    sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
1945 
1946 	/*
1947 	 * Now set up the TX queues, txconf is needed to handle the
1948 	 * possibility that things fail midcourse and we need to
1949 	 * undo memory gracefully
1950 	 */
1951 	for (i = 0; i < sc->num_queues; i++, txconf++) {
1952 		/* Set up some basics */
1953 		txr = &sc->tx_rings[i];
1954 		txr->sc = sc;
1955 		txr->me = i;
1956 
1957 		if (ixgbe_dma_malloc(sc, tsize,
1958 		    &txr->txdma, BUS_DMA_NOWAIT)) {
1959 			printf("%s: Unable to allocate TX Descriptor memory\n",
1960 			    ifp->if_xname);
1961 			goto err_tx_desc;
1962 		}
1963 		txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
1964 		bzero((void *)txr->tx_base, tsize);
1965 	}
1966 
1967 	/*
1968 	 * Next the RX queues...
1969 	 */
1970 	rsize = roundup2(sc->num_rx_desc *
1971 	    sizeof(union ixgbe_adv_rx_desc), 4096);
1972 	for (i = 0; i < sc->num_queues; i++, rxconf++) {
1973 		rxr = &sc->rx_rings[i];
1974 		/* Set up some basics */
1975 		rxr->sc = sc;
1976 		rxr->me = i;
1977 
1978 		if (ixgbe_dma_malloc(sc, rsize,
1979 			&rxr->rxdma, BUS_DMA_NOWAIT)) {
1980 			printf("%s: Unable to allocate RxDescriptor memory\n",
1981 			    ifp->if_xname);
1982 			goto err_rx_desc;
1983 		}
1984 		rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
1985 		bzero((void *)rxr->rx_base, rsize);
1986 	}
1987 
1988 	/*
1989 	 * Finally set up the queue holding structs
1990 	 */
1991 	for (i = 0; i < sc->num_queues; i++) {
1992 		que = &sc->queues[i];
1993 		que->sc = sc;
1994 		que->txr = &sc->tx_rings[i];
1995 		que->rxr = &sc->rx_rings[i];
1996 	}
1997 
1998 	return (0);
1999 
2000 err_rx_desc:
2001 	for (rxr = sc->rx_rings; rxconf > 0; rxr++, rxconf--)
2002 		ixgbe_dma_free(sc, &rxr->rxdma);
2003 err_tx_desc:
2004 	for (txr = sc->tx_rings; txconf > 0; txr++, txconf--)
2005 		ixgbe_dma_free(sc, &txr->txdma);
2006 	free(sc->rx_rings, M_DEVBUF, sc->num_queues * sizeof(struct rx_ring));
2007 	sc->rx_rings = NULL;
2008 rx_fail:
2009 	free(sc->tx_rings, M_DEVBUF, sc->num_queues * sizeof(struct tx_ring));
2010 	sc->tx_rings = NULL;
2011 fail:
2012 	return (ENOMEM);
2013 }
2014 
2015 /*********************************************************************
2016  *
2017  *  Allocate memory for tx_buffer structures. The tx_buffer stores all
2018  *  the information needed to transmit a packet on the wire. This is
2019  *  called only once at attach, setup is done every reset.
2020  *
2021  **********************************************************************/
2022 int
2023 ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
2024 {
2025 	struct ix_softc 	*sc = txr->sc;
2026 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2027 	struct ixgbe_tx_buf	*txbuf;
2028 	int			 error, i;
2029 
2030 	if (!(txr->tx_buffers = mallocarray(sc->num_tx_desc,
2031 	    sizeof(struct ixgbe_tx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2032 		printf("%s: Unable to allocate tx_buffer memory\n",
2033 		    ifp->if_xname);
2034 		error = ENOMEM;
2035 		goto fail;
2036 	}
2037 	txr->txtag = txr->txdma.dma_tag;
2038 
2039 	/* Create the descriptor buffer dma maps */
2040 	for (i = 0; i < sc->num_tx_desc; i++) {
2041 		txbuf = &txr->tx_buffers[i];
2042 		error = bus_dmamap_create(txr->txdma.dma_tag, IXGBE_TSO_SIZE,
2043 			    sc->num_segs, PAGE_SIZE, 0,
2044 			    BUS_DMA_NOWAIT, &txbuf->map);
2045 
2046 		if (error != 0) {
2047 			printf("%s: Unable to create TX DMA map\n",
2048 			    ifp->if_xname);
2049 			goto fail;
2050 		}
2051 	}
2052 
2053 	return 0;
2054 fail:
2055 	return (error);
2056 }
2057 
2058 /*********************************************************************
2059  *
2060  *  Initialize a transmit ring.
2061  *
2062  **********************************************************************/
2063 int
2064 ixgbe_setup_transmit_ring(struct tx_ring *txr)
2065 {
2066 	struct ix_softc		*sc = txr->sc;
2067 	int			 error;
2068 
2069 	/* Now allocate transmit buffers for the ring */
2070 	if ((error = ixgbe_allocate_transmit_buffers(txr)) != 0)
2071 		return (error);
2072 
2073 	/* Clear the old ring contents */
2074 	bzero((void *)txr->tx_base,
2075 	      (sizeof(union ixgbe_adv_tx_desc)) * sc->num_tx_desc);
2076 
2077 	/* Reset indices */
2078 	txr->next_avail_desc = 0;
2079 	txr->next_to_clean = 0;
2080 
2081 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2082 	    0, txr->txdma.dma_map->dm_mapsize,
2083 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2084 
2085 	return (0);
2086 }
2087 
2088 /*********************************************************************
2089  *
2090  *  Initialize all transmit rings.
2091  *
2092  **********************************************************************/
2093 int
2094 ixgbe_setup_transmit_structures(struct ix_softc *sc)
2095 {
2096 	struct tx_ring *txr = sc->tx_rings;
2097 	int		i, error;
2098 
2099 	for (i = 0; i < sc->num_queues; i++, txr++) {
2100 		if ((error = ixgbe_setup_transmit_ring(txr)) != 0)
2101 			goto fail;
2102 	}
2103 
2104 	return (0);
2105 fail:
2106 	ixgbe_free_transmit_structures(sc);
2107 	return (error);
2108 }
2109 
2110 /*********************************************************************
2111  *
2112  *  Enable transmit unit.
2113  *
2114  **********************************************************************/
2115 void
2116 ixgbe_initialize_transmit_units(struct ix_softc *sc)
2117 {
2118 	struct ifnet	*ifp = &sc->arpcom.ac_if;
2119 	struct tx_ring	*txr;
2120 	struct ixgbe_hw	*hw = &sc->hw;
2121 	int		 i;
2122 	uint64_t	 tdba;
2123 	uint32_t	 txctrl;
2124 
2125 	/* Setup the Base and Length of the Tx Descriptor Ring */
2126 
2127 	for (i = 0; i < sc->num_queues; i++) {
2128 		txr = &sc->tx_rings[i];
2129 
2130 		/* Setup descriptor base address */
2131 		tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
2132 		IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
2133 		       (tdba & 0x00000000ffffffffULL));
2134 		IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
2135 		IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
2136 		    sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
2137 
2138 		/* Setup the HW Tx Head and Tail descriptor pointers */
2139 		IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
2140 		IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
2141 
2142 		/* Setup Transmit Descriptor Cmd Settings */
2143 		txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
2144 		txr->queue_status = IXGBE_QUEUE_IDLE;
2145 		txr->watchdog_timer = 0;
2146 
2147 		/* Disable Head Writeback */
2148 		switch (hw->mac.type) {
2149 		case ixgbe_mac_82598EB:
2150 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
2151 			break;
2152 		case ixgbe_mac_82599EB:
2153 		case ixgbe_mac_X540:
2154 		default:
2155 			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
2156 			break;
2157 		}
2158 		txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2159 		switch (hw->mac.type) {
2160 		case ixgbe_mac_82598EB:
2161 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
2162 			break;
2163 		case ixgbe_mac_82599EB:
2164 		case ixgbe_mac_X540:
2165 		default:
2166 			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
2167 			break;
2168 		}
2169 	}
2170 	ifp->if_timer = 0;
2171 
2172 	if (hw->mac.type != ixgbe_mac_82598EB) {
2173 		uint32_t dmatxctl, rttdcs;
2174 		dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2175 		dmatxctl |= IXGBE_DMATXCTL_TE;
2176 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2177 		/* Disable arbiter to set MTQC */
2178 		rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2179 		rttdcs |= IXGBE_RTTDCS_ARBDIS;
2180 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2181 		IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2182 		rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2183 		IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2184 	}
2185 }
2186 
2187 /*********************************************************************
2188  *
2189  *  Free all transmit rings.
2190  *
2191  **********************************************************************/
2192 void
2193 ixgbe_free_transmit_structures(struct ix_softc *sc)
2194 {
2195 	struct tx_ring *txr = sc->tx_rings;
2196 	int		i;
2197 
2198 	for (i = 0; i < sc->num_queues; i++, txr++)
2199 		ixgbe_free_transmit_buffers(txr);
2200 }
2201 
2202 /*********************************************************************
2203  *
2204  *  Free transmit ring related data structures.
2205  *
2206  **********************************************************************/
2207 void
2208 ixgbe_free_transmit_buffers(struct tx_ring *txr)
2209 {
2210 	struct ix_softc *sc = txr->sc;
2211 	struct ixgbe_tx_buf *tx_buffer;
2212 	int             i;
2213 
2214 	INIT_DEBUGOUT("free_transmit_ring: begin");
2215 
2216 	if (txr->tx_buffers == NULL)
2217 		return;
2218 
2219 	tx_buffer = txr->tx_buffers;
2220 	for (i = 0; i < sc->num_tx_desc; i++, tx_buffer++) {
2221 		if (tx_buffer->map != NULL && tx_buffer->map->dm_nsegs > 0) {
2222 			bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2223 			    0, tx_buffer->map->dm_mapsize,
2224 			    BUS_DMASYNC_POSTWRITE);
2225 			bus_dmamap_unload(txr->txdma.dma_tag,
2226 			    tx_buffer->map);
2227 		}
2228 		if (tx_buffer->m_head != NULL) {
2229 			m_freem(tx_buffer->m_head);
2230 			tx_buffer->m_head = NULL;
2231 		}
2232 		if (tx_buffer->map != NULL) {
2233 			bus_dmamap_destroy(txr->txdma.dma_tag,
2234 			    tx_buffer->map);
2235 			tx_buffer->map = NULL;
2236 		}
2237 	}
2238 
2239 	if (txr->tx_buffers != NULL)
2240 		free(txr->tx_buffers, M_DEVBUF,
2241 		    sc->num_tx_desc * sizeof(struct ixgbe_tx_buf));
2242 	txr->tx_buffers = NULL;
2243 	txr->txtag = NULL;
2244 }
2245 
2246 /*********************************************************************
2247  *
2248  *  Advanced Context Descriptor setup for VLAN or CSUM
2249  *
2250  **********************************************************************/
2251 
2252 int
2253 ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp,
2254     uint32_t *cmd_type_len, uint32_t *olinfo_status)
2255 {
2256 	struct ixgbe_adv_tx_context_desc *TXD;
2257 	struct ixgbe_tx_buf *tx_buffer;
2258 #if NVLAN > 0
2259 	struct ether_vlan_header *eh;
2260 #else
2261 	struct ether_header *eh;
2262 #endif
2263 	struct ip *ip;
2264 #ifdef notyet
2265 	struct ip6_hdr *ip6;
2266 #endif
2267 	struct mbuf *m;
2268 	int	ipoff;
2269 	uint32_t vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2270 	int 	ehdrlen, ip_hlen = 0;
2271 	uint16_t etype;
2272 	uint8_t	ipproto = 0;
2273 	int	offload = TRUE;
2274 	int	ctxd = txr->next_avail_desc;
2275 #if NVLAN > 0
2276 	uint16_t vtag = 0;
2277 #endif
2278 
2279 #if notyet
2280 	/* First check if TSO is to be used */
2281 	if (mp->m_pkthdr.csum_flags & CSUM_TSO)
2282 		return (ixgbe_tso_setup(txr, mp, cmd_type_len, olinfo_status));
2283 #endif
2284 
2285 	if ((mp->m_pkthdr.csum_flags & (M_TCP_CSUM_OUT | M_UDP_CSUM_OUT)) == 0)
2286 		offload = FALSE;
2287 
2288 	/* Indicate the whole packet as payload when not doing TSO */
2289 	*olinfo_status |= mp->m_pkthdr.len << IXGBE_ADVTXD_PAYLEN_SHIFT;
2290 
2291 	/* Now ready a context descriptor */
2292 	TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
2293 	tx_buffer = &txr->tx_buffers[ctxd];
2294 
2295 	/*
2296 	 * In advanced descriptors the vlan tag must
2297 	 * be placed into the descriptor itself. Hence
2298 	 * we need to make one even if not doing offloads.
2299 	 */
2300 #if NVLAN > 0
2301 	if (mp->m_flags & M_VLANTAG) {
2302 		vtag = mp->m_pkthdr.ether_vtag;
2303 		vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
2304 	} else
2305 #endif
2306 	if (offload == FALSE)
2307 		return (0);	/* No need for CTX */
2308 
2309 	/*
2310 	 * Determine where frame payload starts.
2311 	 * Jump over vlan headers if already present,
2312 	 * helpful for QinQ too.
2313 	 */
2314 	if (mp->m_len < sizeof(struct ether_header))
2315 		return (-1);
2316 #if NVLAN > 0
2317 	eh = mtod(mp, struct ether_vlan_header *);
2318 	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
2319 		if (mp->m_len < sizeof(struct ether_vlan_header))
2320 			return (-1);
2321 		etype = ntohs(eh->evl_proto);
2322 		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
2323 	} else {
2324 		etype = ntohs(eh->evl_encap_proto);
2325 		ehdrlen = ETHER_HDR_LEN;
2326 	}
2327 #else
2328 	eh = mtod(mp, struct ether_header *);
2329 	etype = ntohs(eh->ether_type);
2330 	ehdrlen = ETHER_HDR_LEN;
2331 #endif
2332 
2333 	/* Set the ether header length */
2334 	vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
2335 
2336 	switch (etype) {
2337 	case ETHERTYPE_IP:
2338 		if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip))
2339 			return (-1);
2340 		m = m_getptr(mp, ehdrlen, &ipoff);
2341 		KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip));
2342 		ip = (struct ip *)(m->m_data + ipoff);
2343 		ip_hlen = ip->ip_hl << 2;
2344 		ipproto = ip->ip_p;
2345 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2346 		break;
2347 #ifdef notyet
2348 	case ETHERTYPE_IPV6:
2349 		if (mp->m_pkthdr.len < ehdrlen + sizeof(*ip6))
2350 			return (-1);
2351 		m = m_getptr(mp, ehdrlen, &ipoff);
2352 		KASSERT(m != NULL && m->m_len - ipoff >= sizeof(*ip6));
2353 		ip6 = (struct ip6 *)(m->m_data + ipoff);
2354 		ip_hlen = sizeof(*ip6);
2355 		/* XXX-BZ this will go badly in case of ext hdrs. */
2356 		ipproto = ip6->ip6_nxt;
2357 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
2358 		break;
2359 #endif
2360 	default:
2361 		offload = FALSE;
2362 		break;
2363 	}
2364 
2365 	vlan_macip_lens |= ip_hlen;
2366 	type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
2367 
2368 	switch (ipproto) {
2369 	case IPPROTO_TCP:
2370 		if (mp->m_pkthdr.csum_flags & M_TCP_CSUM_OUT)
2371 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2372 		break;
2373 	case IPPROTO_UDP:
2374 		if (mp->m_pkthdr.csum_flags & M_UDP_CSUM_OUT)
2375 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
2376 		break;
2377 	default:
2378 		offload = FALSE;
2379 		break;
2380 	}
2381 
2382 	if (offload) /* For the TX descriptor setup */
2383 		*olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
2384 
2385 	/* Now copy bits into descriptor */
2386 	TXD->vlan_macip_lens = htole32(vlan_macip_lens);
2387 	TXD->type_tucmd_mlhl = htole32(type_tucmd_mlhl);
2388 	TXD->seqnum_seed = htole32(0);
2389 	TXD->mss_l4len_idx = htole32(0);
2390 
2391 	tx_buffer->m_head = NULL;
2392 	tx_buffer->eop_index = -1;
2393 
2394 	return (1);
2395 }
2396 
2397 /**********************************************************************
2398  *
2399  *  Examine each tx_buffer in the used queue. If the hardware is done
2400  *  processing the packet then free associated resources. The
2401  *  tx_buffer is put back on the free queue.
2402  *
2403  **********************************************************************/
2404 int
2405 ixgbe_txeof(struct tx_ring *txr)
2406 {
2407 	struct ix_softc			*sc = txr->sc;
2408 	struct ifnet			*ifp = &sc->arpcom.ac_if;
2409 	unsigned int			 head, tail, last;
2410 	struct ixgbe_tx_buf		*tx_buffer;
2411 	struct ixgbe_legacy_tx_desc	*tx_desc;
2412 
2413 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2414 		return FALSE;
2415 
2416 	head = txr->next_avail_desc;
2417 	tail = txr->next_to_clean;
2418 
2419 	membar_consumer();
2420 
2421 	if (head == tail)
2422 		return (FALSE);
2423 
2424 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2425 	    0, txr->txdma.dma_map->dm_mapsize,
2426 	    BUS_DMASYNC_POSTREAD);
2427 
2428 	for (;;) {
2429 		tx_buffer = &txr->tx_buffers[tail];
2430 		last = tx_buffer->eop_index;
2431 		tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
2432 
2433 		if (!ISSET(tx_desc->upper.fields.status, IXGBE_TXD_STAT_DD))
2434 			break;
2435 
2436 		bus_dmamap_sync(txr->txdma.dma_tag, tx_buffer->map,
2437 		    0, tx_buffer->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2438 		bus_dmamap_unload(txr->txdma.dma_tag, tx_buffer->map);
2439 		m_freem(tx_buffer->m_head);
2440 
2441 		tx_buffer->m_head = NULL;
2442 		tx_buffer->eop_index = -1;
2443 
2444 		tail = last + 1;
2445 		if (tail == sc->num_tx_desc)
2446 			tail = 0;
2447 		if (head == tail) {
2448 			/* All clean, turn off the timer */
2449 			ifp->if_timer = 0;
2450 			break;
2451 		}
2452 	}
2453 
2454 	bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
2455 	    0, txr->txdma.dma_map->dm_mapsize,
2456 	    BUS_DMASYNC_PREREAD);
2457 
2458 	membar_producer();
2459 
2460 	txr->next_to_clean = tail;
2461 
2462 	if (ifq_is_oactive(&ifp->if_snd))
2463 		ifq_restart(&ifp->if_snd);
2464 
2465 	return TRUE;
2466 }
2467 
2468 /*********************************************************************
2469  *
2470  *  Get a buffer from system mbuf buffer pool.
2471  *
2472  **********************************************************************/
2473 int
2474 ixgbe_get_buf(struct rx_ring *rxr, int i)
2475 {
2476 	struct ix_softc		*sc = rxr->sc;
2477 	struct ixgbe_rx_buf	*rxbuf;
2478 	struct mbuf		*mp;
2479 	int			error;
2480 	union ixgbe_adv_rx_desc	*rxdesc;
2481 
2482 	rxbuf = &rxr->rx_buffers[i];
2483 	rxdesc = &rxr->rx_base[i];
2484 	if (rxbuf->buf) {
2485 		printf("%s: ixgbe_get_buf: slot %d already has an mbuf\n",
2486 		    sc->dev.dv_xname, i);
2487 		return (ENOBUFS);
2488 	}
2489 
2490 	/* needed in any case so prealocate since this one will fail for sure */
2491 	mp = MCLGETI(NULL, M_DONTWAIT, NULL, sc->rx_mbuf_sz);
2492 	if (!mp)
2493 		return (ENOBUFS);
2494 
2495 	mp->m_data += (mp->m_ext.ext_size - sc->rx_mbuf_sz);
2496 	mp->m_len = mp->m_pkthdr.len = sc->rx_mbuf_sz;
2497 
2498 	error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
2499 	    mp, BUS_DMA_NOWAIT);
2500 	if (error) {
2501 		m_freem(mp);
2502 		return (error);
2503 	}
2504 
2505 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2506 	    0, rxbuf->map->dm_mapsize, BUS_DMASYNC_PREREAD);
2507 	rxbuf->buf = mp;
2508 
2509 	rxdesc->read.pkt_addr = htole64(rxbuf->map->dm_segs[0].ds_addr);
2510 
2511 	return (0);
2512 }
2513 
2514 /*********************************************************************
2515  *
2516  *  Allocate memory for rx_buffer structures. Since we use one
2517  *  rx_buffer per received packet, the maximum number of rx_buffer's
2518  *  that we'll need is equal to the number of receive descriptors
2519  *  that we've allocated.
2520  *
2521  **********************************************************************/
2522 int
2523 ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
2524 {
2525 	struct ix_softc		*sc = rxr->sc;
2526 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2527 	struct ixgbe_rx_buf 	*rxbuf;
2528 	int			i, error;
2529 
2530 	if (!(rxr->rx_buffers = mallocarray(sc->num_rx_desc,
2531 	    sizeof(struct ixgbe_rx_buf), M_DEVBUF, M_NOWAIT | M_ZERO))) {
2532 		printf("%s: Unable to allocate rx_buffer memory\n",
2533 		    ifp->if_xname);
2534 		error = ENOMEM;
2535 		goto fail;
2536 	}
2537 
2538 	rxbuf = rxr->rx_buffers;
2539 	for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
2540 		error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,
2541 		    16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->map);
2542 		if (error) {
2543 			printf("%s: Unable to create Pack DMA map\n",
2544 			    ifp->if_xname);
2545 			goto fail;
2546 		}
2547 	}
2548 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, 0,
2549 	    rxr->rxdma.dma_map->dm_mapsize,
2550 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2551 
2552 	return (0);
2553 
2554 fail:
2555 	return (error);
2556 }
2557 
2558 /*********************************************************************
2559  *
2560  *  Initialize a receive ring and its buffers.
2561  *
2562  **********************************************************************/
2563 int
2564 ixgbe_setup_receive_ring(struct rx_ring *rxr)
2565 {
2566 	struct ix_softc		*sc = rxr->sc;
2567 	struct ifnet		*ifp = &sc->arpcom.ac_if;
2568 	int			 rsize, error;
2569 
2570 	rsize = roundup2(sc->num_rx_desc *
2571 	    sizeof(union ixgbe_adv_rx_desc), 4096);
2572 	/* Clear the ring contents */
2573 	bzero((void *)rxr->rx_base, rsize);
2574 
2575 	if ((error = ixgbe_allocate_receive_buffers(rxr)) != 0)
2576 		return (error);
2577 
2578 	/* Setup our descriptor indices */
2579 	rxr->next_to_check = 0;
2580 	rxr->last_desc_filled = sc->num_rx_desc - 1;
2581 
2582 	if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
2583 	    sc->num_rx_desc - 1);
2584 
2585 	ixgbe_rxfill(rxr);
2586 	if (if_rxr_inuse(&rxr->rx_ring) == 0) {
2587 		printf("%s: unable to fill any rx descriptors\n",
2588 		    sc->dev.dv_xname);
2589 		return (ENOBUFS);
2590 	}
2591 
2592 	return (0);
2593 }
2594 
2595 int
2596 ixgbe_rxfill(struct rx_ring *rxr)
2597 {
2598 	struct ix_softc *sc = rxr->sc;
2599 	int		 post = 0;
2600 	u_int		 slots;
2601 	int		 i;
2602 
2603 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2604 	    0, rxr->rxdma.dma_map->dm_mapsize,
2605 	    BUS_DMASYNC_POSTWRITE);
2606 
2607 	i = rxr->last_desc_filled;
2608 	for (slots = if_rxr_get(&rxr->rx_ring, sc->num_rx_desc);
2609 	    slots > 0; slots--) {
2610 		if (++i == sc->num_rx_desc)
2611 			i = 0;
2612 
2613 		if (ixgbe_get_buf(rxr, i) != 0)
2614 			break;
2615 
2616 		rxr->last_desc_filled = i;
2617 		post = 1;
2618 	}
2619 
2620 	bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2621 	    0, rxr->rxdma.dma_map->dm_mapsize,
2622 	    BUS_DMASYNC_PREWRITE);
2623 
2624 	if_rxr_put(&rxr->rx_ring, slots);
2625 
2626 	return (post);
2627 }
2628 
2629 void
2630 ixgbe_rxrefill(void *xsc)
2631 {
2632 	struct ix_softc *sc = xsc;
2633 	struct ix_queue *que = sc->queues;
2634 	int s;
2635 
2636 	s = splnet();
2637 	if (ixgbe_rxfill(que->rxr)) {
2638 		/* Advance the Rx Queue "Tail Pointer" */
2639 		IXGBE_WRITE_REG(&sc->hw, IXGBE_RDT(que->rxr->me),
2640 		    que->rxr->last_desc_filled);
2641 	} else
2642 		timeout_add(&sc->rx_refill, 1);
2643 	splx(s);
2644 }
2645 
2646 /*********************************************************************
2647  *
2648  *  Initialize all receive rings.
2649  *
2650  **********************************************************************/
2651 int
2652 ixgbe_setup_receive_structures(struct ix_softc *sc)
2653 {
2654 	struct rx_ring *rxr = sc->rx_rings;
2655 	int i;
2656 
2657 	for (i = 0; i < sc->num_queues; i++, rxr++)
2658 		if (ixgbe_setup_receive_ring(rxr))
2659 			goto fail;
2660 
2661 	return (0);
2662 fail:
2663 	ixgbe_free_receive_structures(sc);
2664 	return (ENOBUFS);
2665 }
2666 
2667 /*********************************************************************
2668  *
2669  *  Setup receive registers and features.
2670  *
2671  **********************************************************************/
2672 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2673 
2674 void
2675 ixgbe_initialize_receive_units(struct ix_softc *sc)
2676 {
2677 	struct rx_ring	*rxr = sc->rx_rings;
2678 	struct ixgbe_hw	*hw = &sc->hw;
2679 	uint32_t	bufsz, fctrl, srrctl, rxcsum;
2680 	uint32_t	hlreg;
2681 	int		i;
2682 
2683 	/*
2684 	 * Make sure receives are disabled while
2685 	 * setting up the descriptor ring
2686 	 */
2687 	ixgbe_disable_rx(hw);
2688 
2689 	/* Enable broadcasts */
2690 	fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2691 	fctrl |= IXGBE_FCTRL_BAM;
2692 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
2693 		fctrl |= IXGBE_FCTRL_DPF;
2694 		fctrl |= IXGBE_FCTRL_PMCF;
2695 	}
2696 	IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2697 
2698 	/* Always enable jumbo frame reception */
2699 	hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2700 	hlreg |= IXGBE_HLREG0_JUMBOEN;
2701 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
2702 
2703 	bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2704 
2705 	for (i = 0; i < sc->num_queues; i++, rxr++) {
2706 		uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
2707 
2708 		/* Setup the Base and Length of the Rx Descriptor Ring */
2709 		IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
2710 			       (rdba & 0x00000000ffffffffULL));
2711 		IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
2712 		IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
2713 		    sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
2714 
2715 		/* Set up the SRRCTL register */
2716 		srrctl = bufsz | IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2717 		IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
2718 
2719 		/* Setup the HW Rx Head and Tail Descriptor Pointers */
2720 		IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
2721 		IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
2722 	}
2723 
2724 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
2725 		uint32_t psrtype = IXGBE_PSRTYPE_TCPHDR |
2726 			      IXGBE_PSRTYPE_UDPHDR |
2727 			      IXGBE_PSRTYPE_IPV4HDR |
2728 			      IXGBE_PSRTYPE_IPV6HDR;
2729 		IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2730 	}
2731 
2732 	rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2733 	rxcsum &= ~IXGBE_RXCSUM_PCSD;
2734 
2735 	ixgbe_initialize_rss_mapping(sc);
2736 
2737 	/* Setup RSS */
2738 	if (sc->num_queues > 1) {
2739 		/* RSS and RX IPP Checksum are mutually exclusive */
2740 		rxcsum |= IXGBE_RXCSUM_PCSD;
2741 	}
2742 
2743 	/* This is useful for calculating UDP/IP fragment checksums */
2744 	if (!(rxcsum & IXGBE_RXCSUM_PCSD))
2745 		rxcsum |= IXGBE_RXCSUM_IPPCSE;
2746 
2747 	IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2748 }
2749 
2750 void
2751 ixgbe_initialize_rss_mapping(struct ix_softc *sc)
2752 {
2753 	struct ixgbe_hw	*hw = &sc->hw;
2754 	uint32_t reta = 0, mrqc, rss_key[10];
2755 	int i, j, queue_id, table_size, index_mult;
2756 
2757 	/* set up random bits */
2758 	arc4random_buf(&rss_key, sizeof(rss_key));
2759 
2760 	/* Set multiplier for RETA setup and table size based on MAC */
2761 	index_mult = 0x1;
2762 	table_size = 128;
2763 	switch (sc->hw.mac.type) {
2764 	case ixgbe_mac_82598EB:
2765 		index_mult = 0x11;
2766 		break;
2767 	case ixgbe_mac_X550:
2768 	case ixgbe_mac_X550EM_x:
2769 	case ixgbe_mac_X550EM_a:
2770 		table_size = 512;
2771 		break;
2772 	default:
2773 		break;
2774 	}
2775 
2776 	/* Set up the redirection table */
2777 	for (i = 0, j = 0; i < table_size; i++, j++) {
2778 		if (j == sc->num_queues) j = 0;
2779 		queue_id = (j * index_mult);
2780 		/*
2781 		 * The low 8 bits are for hash value (n+0);
2782 		 * The next 8 bits are for hash value (n+1), etc.
2783 		 */
2784 		reta = reta >> 8;
2785 		reta = reta | ( ((uint32_t) queue_id) << 24);
2786 		if ((i & 3) == 3) {
2787 			if (i < 128)
2788 				IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2789 			else
2790 				IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
2791 				    reta);
2792 			reta = 0;
2793 		}
2794 	}
2795 
2796 	/* Now fill our hash function seeds */
2797 	for (i = 0; i < 10; i++)
2798 		IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), rss_key[i]);
2799 
2800 	/*
2801 	 * Disable UDP - IP fragments aren't currently being handled
2802 	 * and so we end up with a mix of 2-tuple and 4-tuple
2803 	 * traffic.
2804 	 */
2805 	mrqc = IXGBE_MRQC_RSSEN
2806 	     | IXGBE_MRQC_RSS_FIELD_IPV4
2807 	     | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2808 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
2809 	     | IXGBE_MRQC_RSS_FIELD_IPV6_EX
2810 	     | IXGBE_MRQC_RSS_FIELD_IPV6
2811 	     | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2812 	;
2813 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2814 }
2815 
2816 /*********************************************************************
2817  *
2818  *  Free all receive rings.
2819  *
2820  **********************************************************************/
2821 void
2822 ixgbe_free_receive_structures(struct ix_softc *sc)
2823 {
2824 	struct rx_ring *rxr;
2825 	int		i;
2826 
2827 	for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2828 		if_rxr_init(&rxr->rx_ring, 0, 0);
2829 
2830 	for (i = 0, rxr = sc->rx_rings; i < sc->num_queues; i++, rxr++)
2831 		ixgbe_free_receive_buffers(rxr);
2832 }
2833 
2834 /*********************************************************************
2835  *
2836  *  Free receive ring data structures
2837  *
2838  **********************************************************************/
2839 void
2840 ixgbe_free_receive_buffers(struct rx_ring *rxr)
2841 {
2842 	struct ix_softc		*sc;
2843 	struct ixgbe_rx_buf	*rxbuf;
2844 	int			 i;
2845 
2846 	sc = rxr->sc;
2847 	if (rxr->rx_buffers != NULL) {
2848 		for (i = 0; i < sc->num_rx_desc; i++) {
2849 			rxbuf = &rxr->rx_buffers[i];
2850 			if (rxbuf->buf != NULL) {
2851 				bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map,
2852 				    0, rxbuf->map->dm_mapsize,
2853 				    BUS_DMASYNC_POSTREAD);
2854 				bus_dmamap_unload(rxr->rxdma.dma_tag,
2855 				    rxbuf->map);
2856 				m_freem(rxbuf->buf);
2857 				rxbuf->buf = NULL;
2858 			}
2859 			bus_dmamap_destroy(rxr->rxdma.dma_tag, rxbuf->map);
2860 			rxbuf->map = NULL;
2861 		}
2862 		free(rxr->rx_buffers, M_DEVBUF,
2863 		    sc->num_rx_desc * sizeof(struct ixgbe_rx_buf));
2864 		rxr->rx_buffers = NULL;
2865 	}
2866 }
2867 
2868 /*********************************************************************
2869  *
2870  *  This routine executes in interrupt context. It replenishes
2871  *  the mbufs in the descriptor and sends data which has been
2872  *  dma'ed into host memory to upper layer.
2873  *
2874  *********************************************************************/
2875 int
2876 ixgbe_rxeof(struct ix_queue *que)
2877 {
2878 	struct ix_softc 	*sc = que->sc;
2879 	struct rx_ring		*rxr = que->rxr;
2880 	struct ifnet   		*ifp = &sc->arpcom.ac_if;
2881 	struct mbuf_list	 ml = MBUF_LIST_INITIALIZER();
2882 	struct mbuf    		*mp, *sendmp;
2883 	uint8_t		    	 eop = 0;
2884 	uint16_t		 len, vtag;
2885 	uint32_t		 staterr = 0, ptype;
2886 	struct ixgbe_rx_buf	*rxbuf, *nxbuf;
2887 	union ixgbe_adv_rx_desc	*rxdesc;
2888 	size_t			 dsize = sizeof(union ixgbe_adv_rx_desc);
2889 	int			 i, nextp;
2890 
2891 	if (!ISSET(ifp->if_flags, IFF_RUNNING))
2892 		return FALSE;
2893 
2894 	i = rxr->next_to_check;
2895 	while (if_rxr_inuse(&rxr->rx_ring) > 0) {
2896 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2897 		    dsize * i, dsize, BUS_DMASYNC_POSTREAD);
2898 
2899 		rxdesc = &rxr->rx_base[i];
2900 		staterr = letoh32(rxdesc->wb.upper.status_error);
2901 		if (!ISSET(staterr, IXGBE_RXD_STAT_DD)) {
2902 			bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
2903 			    dsize * i, dsize,
2904 			    BUS_DMASYNC_PREREAD);
2905 			break;
2906 		}
2907 
2908 		/* Zero out the receive descriptors status  */
2909 		rxdesc->wb.upper.status_error = 0;
2910 		rxbuf = &rxr->rx_buffers[i];
2911 
2912 		/* pull the mbuf off the ring */
2913 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxbuf->map, 0,
2914 		    rxbuf->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
2915 		bus_dmamap_unload(rxr->rxdma.dma_tag, rxbuf->map);
2916 
2917 		mp = rxbuf->buf;
2918 		len = letoh16(rxdesc->wb.upper.length);
2919 		ptype = letoh32(rxdesc->wb.lower.lo_dword.data) &
2920 		    IXGBE_RXDADV_PKTTYPE_MASK;
2921 		vtag = letoh16(rxdesc->wb.upper.vlan);
2922 		eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
2923 
2924 		if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
2925 			sc->dropped_pkts++;
2926 
2927 			if (rxbuf->fmp) {
2928 				m_freem(rxbuf->fmp);
2929 				rxbuf->fmp = NULL;
2930 			}
2931 
2932 			m_freem(mp);
2933 			rxbuf->buf = NULL;
2934 			goto next_desc;
2935 		}
2936 
2937 		if (mp == NULL) {
2938 			panic("%s: ixgbe_rxeof: NULL mbuf in slot %d "
2939 			    "(nrx %d, filled %d)", sc->dev.dv_xname,
2940 			    i, if_rxr_inuse(&rxr->rx_ring),
2941 			    rxr->last_desc_filled);
2942 		}
2943 
2944 		/* Currently no HW RSC support of 82599 */
2945 		if (!eop) {
2946 			/*
2947 			 * Figure out the next descriptor of this frame.
2948 			 */
2949 			nextp = i + 1;
2950 			if (nextp == sc->num_rx_desc)
2951 				nextp = 0;
2952 			nxbuf = &rxr->rx_buffers[nextp];
2953 			/* prefetch(nxbuf); */
2954 		}
2955 
2956 		/*
2957 		 * Rather than using the fmp/lmp global pointers
2958 		 * we now keep the head of a packet chain in the
2959 		 * buffer struct and pass this along from one
2960 		 * descriptor to the next, until we get EOP.
2961 		 */
2962 		mp->m_len = len;
2963 		/*
2964 		 * See if there is a stored head
2965 		 * that determines what we are
2966 		 */
2967 		sendmp = rxbuf->fmp;
2968 		rxbuf->buf = rxbuf->fmp = NULL;
2969 
2970 		if (sendmp != NULL) /* secondary frag */
2971 			sendmp->m_pkthdr.len += mp->m_len;
2972 		else {
2973 			/* first desc of a non-ps chain */
2974 			sendmp = mp;
2975 			sendmp->m_pkthdr.len = mp->m_len;
2976 #if NVLAN > 0
2977 			if (staterr & IXGBE_RXD_STAT_VP) {
2978 				sendmp->m_pkthdr.ether_vtag = vtag;
2979 				sendmp->m_flags |= M_VLANTAG;
2980 			}
2981 #endif
2982 		}
2983 
2984 		/* Pass the head pointer on */
2985 		if (eop == 0) {
2986 			nxbuf->fmp = sendmp;
2987 			sendmp = NULL;
2988 			mp->m_next = nxbuf->buf;
2989 		} else { /* Sending this frame? */
2990 			rxr->rx_packets++;
2991 			/* capture data for AIM */
2992 			rxr->bytes += sendmp->m_pkthdr.len;
2993 			rxr->rx_bytes += sendmp->m_pkthdr.len;
2994 
2995 			ixgbe_rx_checksum(staterr, sendmp, ptype);
2996 
2997 			ml_enqueue(&ml, sendmp);
2998 		}
2999 next_desc:
3000 		if_rxr_put(&rxr->rx_ring, 1);
3001 		bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
3002 		    dsize * i, dsize,
3003 		    BUS_DMASYNC_PREREAD);
3004 
3005 		/* Advance our pointers to the next descriptor. */
3006 		if (++i == sc->num_rx_desc)
3007 			i = 0;
3008 	}
3009 	rxr->next_to_check = i;
3010 
3011 	if_input(ifp, &ml);
3012 
3013 	if (!(staterr & IXGBE_RXD_STAT_DD))
3014 		return FALSE;
3015 
3016 	return TRUE;
3017 }
3018 
3019 /*********************************************************************
3020  *
3021  *  Verify that the hardware indicated that the checksum is valid.
3022  *  Inform the stack about the status of checksum so that stack
3023  *  doesn't spend time verifying the checksum.
3024  *
3025  *********************************************************************/
3026 void
3027 ixgbe_rx_checksum(uint32_t staterr, struct mbuf * mp, uint32_t ptype)
3028 {
3029 	uint16_t status = (uint16_t) staterr;
3030 	uint8_t  errors = (uint8_t) (staterr >> 24);
3031 
3032 	if (status & IXGBE_RXD_STAT_IPCS) {
3033 		if (!(errors & IXGBE_RXD_ERR_IPE)) {
3034 			/* IP Checksum Good */
3035 			mp->m_pkthdr.csum_flags = M_IPV4_CSUM_IN_OK;
3036 		} else
3037 			mp->m_pkthdr.csum_flags = 0;
3038 	}
3039 	if (status & IXGBE_RXD_STAT_L4CS) {
3040 		if (!(errors & IXGBE_RXD_ERR_TCPE))
3041 			mp->m_pkthdr.csum_flags |=
3042 				M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3043 	}
3044 }
3045 
3046 void
3047 ixgbe_setup_vlan_hw_support(struct ix_softc *sc)
3048 {
3049 	uint32_t	ctrl;
3050 	int		i;
3051 
3052 	/*
3053 	 * A soft reset zero's out the VFTA, so
3054 	 * we need to repopulate it now.
3055 	 */
3056 	for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
3057 		if (sc->shadow_vfta[i] != 0)
3058 			IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTA(i),
3059 			    sc->shadow_vfta[i]);
3060 	}
3061 
3062 	ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_VLNCTRL);
3063 #if 0
3064 	/* Enable the Filter Table if enabled */
3065 	if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
3066 		ctrl &= ~IXGBE_VLNCTRL_CFIEN;
3067 		ctrl |= IXGBE_VLNCTRL_VFE;
3068 	}
3069 #endif
3070 	if (sc->hw.mac.type == ixgbe_mac_82598EB)
3071 		ctrl |= IXGBE_VLNCTRL_VME;
3072 	IXGBE_WRITE_REG(&sc->hw, IXGBE_VLNCTRL, ctrl);
3073 
3074 	/* On 82599 the VLAN enable is per/queue in RXDCTL */
3075 	if (sc->hw.mac.type != ixgbe_mac_82598EB) {
3076 		for (i = 0; i < sc->num_queues; i++) {
3077 			ctrl = IXGBE_READ_REG(&sc->hw, IXGBE_RXDCTL(i));
3078 			ctrl |= IXGBE_RXDCTL_VME;
3079 			IXGBE_WRITE_REG(&sc->hw, IXGBE_RXDCTL(i), ctrl);
3080 		}
3081 	}
3082 }
3083 
3084 void
3085 ixgbe_enable_intr(struct ix_softc *sc)
3086 {
3087 	struct ixgbe_hw *hw = &sc->hw;
3088 	struct ix_queue *que = sc->queues;
3089 	uint32_t	mask, fwsm;
3090 	int i;
3091 
3092 	mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
3093 	/* Enable Fan Failure detection */
3094 	if (hw->device_id == IXGBE_DEV_ID_82598AT)
3095 		    mask |= IXGBE_EIMS_GPI_SDP1;
3096 
3097 	switch (sc->hw.mac.type) {
3098 	case ixgbe_mac_82599EB:
3099 		mask |= IXGBE_EIMS_ECC;
3100 		/* Temperature sensor on some adapters */
3101 		mask |= IXGBE_EIMS_GPI_SDP0;
3102 		/* SFP+ (RX_LOS_N & MOD_ABS_N) */
3103 		mask |= IXGBE_EIMS_GPI_SDP1;
3104 		mask |= IXGBE_EIMS_GPI_SDP2;
3105 		break;
3106 	case ixgbe_mac_X540:
3107 		mask |= IXGBE_EIMS_ECC;
3108 		/* Detect if Thermal Sensor is enabled */
3109 		fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
3110 		if (fwsm & IXGBE_FWSM_TS_ENABLED)
3111 			mask |= IXGBE_EIMS_TS;
3112 		break;
3113 	case ixgbe_mac_X550:
3114 	case ixgbe_mac_X550EM_x:
3115 	case ixgbe_mac_X550EM_a:
3116 		mask |= IXGBE_EIMS_ECC;
3117 		/* MAC thermal sensor is automatically enabled */
3118 		mask |= IXGBE_EIMS_TS;
3119 		/* Some devices use SDP0 for important information */
3120 		if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
3121 		    hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T)
3122 			mask |= IXGBE_EIMS_GPI_SDP0_X540;
3123 	default:
3124 		break;
3125 	}
3126 
3127 	IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
3128 
3129 	/* With MSI-X we use auto clear */
3130 	if (sc->msix > 1) {
3131 		mask = IXGBE_EIMS_ENABLE_MASK;
3132 		/* Don't autoclear Link */
3133 		mask &= ~IXGBE_EIMS_OTHER;
3134 		mask &= ~IXGBE_EIMS_LSC;
3135 		IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
3136 	}
3137 
3138 	/*
3139 	 * Now enable all queues, this is done separately to
3140 	 * allow for handling the extended (beyond 32) MSIX
3141 	 * vectors that can be used by 82599
3142 	 */
3143 	for (i = 0; i < sc->num_queues; i++, que++)
3144 		ixgbe_enable_queue(sc, que->msix);
3145 
3146 	IXGBE_WRITE_FLUSH(hw);
3147 }
3148 
3149 void
3150 ixgbe_disable_intr(struct ix_softc *sc)
3151 {
3152 	if (sc->msix > 1)
3153 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIAC, 0);
3154 	if (sc->hw.mac.type == ixgbe_mac_82598EB) {
3155 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, ~0);
3156 	} else {
3157 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC, 0xFFFF0000);
3158 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(0), ~0);
3159 		IXGBE_WRITE_REG(&sc->hw, IXGBE_EIMC_EX(1), ~0);
3160 	}
3161 	IXGBE_WRITE_FLUSH(&sc->hw);
3162 }
3163 
3164 uint16_t
3165 ixgbe_read_pci_cfg(struct ixgbe_hw *hw, uint32_t reg)
3166 {
3167 	struct pci_attach_args	*pa;
3168 	uint32_t value;
3169 	int high = 0;
3170 
3171 	if (reg & 0x2) {
3172 		high = 1;
3173 		reg &= ~0x2;
3174 	}
3175 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3176 	value = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3177 
3178 	if (high)
3179 		value >>= 16;
3180 
3181 	return (value & 0xffff);
3182 }
3183 
3184 void
3185 ixgbe_write_pci_cfg(struct ixgbe_hw *hw, uint32_t reg, uint16_t value)
3186 {
3187 	struct pci_attach_args	*pa;
3188 	uint32_t rv;
3189 	int high = 0;
3190 
3191 	/* Need to do read/mask/write... because 16 vs 32 bit!!! */
3192 	if (reg & 0x2) {
3193 		high = 1;
3194 		reg &= ~0x2;
3195 	}
3196 	pa = &((struct ixgbe_osdep *)hw->back)->os_pa;
3197 	rv = pci_conf_read(pa->pa_pc, pa->pa_tag, reg);
3198 	if (!high)
3199 		rv = (rv & 0xffff0000) | value;
3200 	else
3201 		rv = (rv & 0xffff) | ((uint32_t)value << 16);
3202 	pci_conf_write(pa->pa_pc, pa->pa_tag, reg, rv);
3203 }
3204 
3205 /*
3206  * Setup the correct IVAR register for a particular MSIX interrupt
3207  *   (yes this is all very magic and confusing :)
3208  *  - entry is the register array entry
3209  *  - vector is the MSIX vector for this queue
3210  *  - type is RX/TX/MISC
3211  */
3212 void
3213 ixgbe_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
3214 {
3215 	struct ixgbe_hw *hw = &sc->hw;
3216 	uint32_t ivar, index;
3217 
3218 	vector |= IXGBE_IVAR_ALLOC_VAL;
3219 
3220 	switch (hw->mac.type) {
3221 
3222 	case ixgbe_mac_82598EB:
3223 		if (type == -1)
3224 			entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
3225 		else
3226 			entry += (type * 64);
3227 		index = (entry >> 2) & 0x1F;
3228 		ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
3229 		ivar &= ~(0xFF << (8 * (entry & 0x3)));
3230 		ivar |= (vector << (8 * (entry & 0x3)));
3231 		IXGBE_WRITE_REG(&sc->hw, IXGBE_IVAR(index), ivar);
3232 		break;
3233 
3234 	case ixgbe_mac_82599EB:
3235 	case ixgbe_mac_X540:
3236 	case ixgbe_mac_X550:
3237 	case ixgbe_mac_X550EM_x:
3238 	case ixgbe_mac_X550EM_a:
3239 		if (type == -1) { /* MISC IVAR */
3240 			index = (entry & 1) * 8;
3241 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
3242 			ivar &= ~(0xFF << index);
3243 			ivar |= (vector << index);
3244 			IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
3245 		} else {	/* RX/TX IVARS */
3246 			index = (16 * (entry & 1)) + (8 * type);
3247 			ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
3248 			ivar &= ~(0xFF << index);
3249 			ivar |= (vector << index);
3250 			IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
3251 		}
3252 
3253 	default:
3254 		break;
3255 	}
3256 }
3257 
3258 void
3259 ixgbe_configure_ivars(struct ix_softc *sc)
3260 {
3261 #if notyet
3262 	struct ix_queue *que = sc->queues;
3263 	uint32_t newitr;
3264 	int i;
3265 
3266 	if (ixgbe_max_interrupt_rate > 0)
3267 		newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
3268 	else
3269 		newitr = 0;
3270 
3271 	for (i = 0; i < sc->num_queues; i++, que++) {
3272 		/* First the RX queue entry */
3273 		ixgbe_set_ivar(sc, i, que->msix, 0);
3274 		/* ... and the TX */
3275 		ixgbe_set_ivar(sc, i, que->msix, 1);
3276 		/* Set an Initial EITR value */
3277 		IXGBE_WRITE_REG(&sc->hw,
3278 		    IXGBE_EITR(que->msix), newitr);
3279 	}
3280 
3281 	/* For the Link interrupt */
3282 	ixgbe_set_ivar(sc, 1, sc->linkvec, -1);
3283 #endif
3284 }
3285 
3286 /*
3287  * SFP module interrupts handler
3288  */
3289 void
3290 ixgbe_handle_mod(struct ix_softc *sc)
3291 {
3292 	struct ixgbe_hw *hw = &sc->hw;
3293 	uint32_t err;
3294 
3295 	err = hw->phy.ops.identify_sfp(hw);
3296 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3297 		printf("%s: Unsupported SFP+ module type was detected!\n",
3298 		    sc->dev.dv_xname);
3299 		return;
3300 	}
3301 	err = hw->mac.ops.setup_sfp(hw);
3302 	if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3303 		printf("%s: Setup failure - unsupported SFP+ module type!\n",
3304 		    sc->dev.dv_xname);
3305 		return;
3306 	}
3307 
3308 	ixgbe_handle_msf(sc);
3309 }
3310 
3311 
3312 /*
3313  * MSF (multispeed fiber) interrupts handler
3314  */
3315 void
3316 ixgbe_handle_msf(struct ix_softc *sc)
3317 {
3318 	struct ixgbe_hw *hw = &sc->hw;
3319 	uint32_t autoneg;
3320 	bool negotiate;
3321 
3322 	autoneg = hw->phy.autoneg_advertised;
3323 	if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) {
3324 		if (hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate))
3325 			return;
3326 	}
3327 	if (hw->mac.ops.setup_link)
3328 		hw->mac.ops.setup_link(hw, autoneg, TRUE);
3329 
3330 	ifmedia_delete_instance(&sc->media, IFM_INST_ANY);
3331 	ixgbe_add_media_types(sc);
3332 	ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
3333 }
3334 
3335 /*
3336  * External PHY interrupts handler
3337  */
3338 void
3339 ixgbe_handle_phy(struct ix_softc *sc)
3340 {
3341 	struct ixgbe_hw *hw = &sc->hw;
3342 	int error;
3343 
3344 	error = hw->phy.ops.handle_lasi(hw);
3345 	if (error == IXGBE_ERR_OVERTEMP)
3346 		printf("%s: CRITICAL: EXTERNAL PHY OVER TEMP!! "
3347 		    " PHY will downshift to lower power state!\n",
3348 		    sc->dev.dv_xname);
3349 	else if (error)
3350 		printf("%s: Error handling LASI interrupt: %d\n",
3351 		    sc->dev.dv_xname, error);
3352 
3353 }
3354 
3355 /**********************************************************************
3356  *
3357  *  Update the board statistics counters.
3358  *
3359  **********************************************************************/
3360 void
3361 ixgbe_update_stats_counters(struct ix_softc *sc)
3362 {
3363 	struct ifnet	*ifp = &sc->arpcom.ac_if;
3364 	struct ixgbe_hw	*hw = &sc->hw;
3365 	uint64_t	total_missed_rx = 0;
3366 #ifdef IX_DEBUG
3367 	uint32_t	missed_rx = 0, bprc, lxon, lxoff, total;
3368 	int		i;
3369 #endif
3370 
3371 	sc->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
3372 	sc->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3373 
3374 #ifdef IX_DEBUG
3375 	for (i = 0; i < 8; i++) {
3376 		uint32_t mp;
3377 		mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3378 		/* missed_rx tallies misses for the gprc workaround */
3379 		missed_rx += mp;
3380 		/* global total per queue */
3381 		sc->stats.mpc[i] += mp;
3382 		/* running comprehensive total for stats display */
3383 		total_missed_rx += sc->stats.mpc[i];
3384 		if (hw->mac.type == ixgbe_mac_82598EB)
3385 			sc->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
3386 	}
3387 
3388 	/* Hardware workaround, gprc counts missed packets */
3389 	sc->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3390 	sc->stats.gprc -= missed_rx;
3391 
3392 	if (hw->mac.type != ixgbe_mac_82598EB) {
3393 		sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
3394 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
3395 		sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
3396 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
3397 		sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
3398 		    ((uint64_t)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
3399 		sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
3400 		sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
3401 	} else {
3402 		sc->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
3403 		sc->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
3404 		/* 82598 only has a counter in the high register */
3405 		sc->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
3406 		sc->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3407 		sc->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
3408 	}
3409 
3410 	/*
3411 	 * Workaround: mprc hardware is incorrectly counting
3412 	 * broadcasts, so for now we subtract those.
3413 	 */
3414 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3415 	sc->stats.bprc += bprc;
3416 	sc->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3417 	if (hw->mac.type == ixgbe_mac_82598EB)
3418 		sc->stats.mprc -= bprc;
3419 
3420 	sc->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3421 	sc->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3422 	sc->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3423 	sc->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3424 	sc->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3425 	sc->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3426 	sc->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
3427 
3428 	lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3429 	sc->stats.lxontxc += lxon;
3430 	lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3431 	sc->stats.lxofftxc += lxoff;
3432 	total = lxon + lxoff;
3433 
3434 	sc->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
3435 	sc->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3436 	sc->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
3437 	sc->stats.gptc -= total;
3438 	sc->stats.mptc -= total;
3439 	sc->stats.ptc64 -= total;
3440 	sc->stats.gotc -= total * ETHER_MIN_LEN;
3441 
3442 	sc->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3443 	sc->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3444 	sc->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
3445 	sc->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3446 	sc->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3447 	sc->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3448 	sc->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3449 	sc->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3450 	sc->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
3451 	sc->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3452 #endif
3453 
3454 	/* Fill out the OS statistics structure */
3455 	ifp->if_collisions = 0;
3456 	ifp->if_oerrors = sc->watchdog_events;
3457 	ifp->if_ierrors = total_missed_rx + sc->stats.crcerrs + sc->stats.rlec;
3458 }
3459 
3460 #ifdef IX_DEBUG
3461 /**********************************************************************
3462  *
3463  *  This routine is called only when ixgbe_display_debug_stats is enabled.
3464  *  This routine provides a way to take a look at important statistics
3465  *  maintained by the driver and hardware.
3466  *
3467  **********************************************************************/
3468 void
3469 ixgbe_print_hw_stats(struct ix_softc * sc)
3470 {
3471 	struct ifnet   *ifp = &sc->arpcom.ac_if;
3472 
3473 	printf("%s: missed pkts %llu, rx len errs %llu, crc errs %llu, "
3474 	    "dropped pkts %lu, watchdog timeouts %ld, "
3475 	    "XON rx %llu, XON tx %llu, XOFF rx %llu, XOFF tx %llu, "
3476 	    "total pkts rx %llu, good pkts rx %llu, good pkts tx %llu, "
3477 	    "tso tx %lu\n",
3478 	    ifp->if_xname,
3479 	    (long long)sc->stats.mpc[0],
3480 	    (long long)sc->stats.roc + (long long)sc->stats.ruc,
3481 	    (long long)sc->stats.crcerrs,
3482 	    sc->dropped_pkts,
3483 	    sc->watchdog_events,
3484 	    (long long)sc->stats.lxonrxc,
3485 	    (long long)sc->stats.lxontxc,
3486 	    (long long)sc->stats.lxoffrxc,
3487 	    (long long)sc->stats.lxofftxc,
3488 	    (long long)sc->stats.tpr,
3489 	    (long long)sc->stats.gprc,
3490 	    (long long)sc->stats.gptc,
3491 	    sc->tso_tx);
3492 }
3493 #endif
3494